From 238b430ec0446d08a8fd513ae7b4000b5996fd69 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Tue, 20 Nov 2018 19:59:25 +0100 Subject: Port WebP decoder changes. This breaks animated WebP for the moment, but adds QCMS color management and lexer changes. Tag #831 --- image/decoders/nsWebPDecoder.cpp | 625 +++++++++++++++++++++++---------------- image/decoders/nsWebPDecoder.h | 56 ++-- 2 files changed, 408 insertions(+), 273 deletions(-) diff --git a/image/decoders/nsWebPDecoder.cpp b/image/decoders/nsWebPDecoder.cpp index 6ed2c3e9c..4d6566486 100644 --- a/image/decoders/nsWebPDecoder.cpp +++ b/image/decoders/nsWebPDecoder.cpp @@ -19,10 +19,6 @@ static LazyLogModule sWebPLog("WebPDecoder"); nsWebPDecoder::nsWebPDecoder(RasterImage* aImage) : Decoder(aImage) - , mLexer(Transition::ToUnbuffered(State::FINISHED_WEBP_DATA, - State::WEBP_DATA, - SIZE_MAX), - Transition::TerminateSuccess()) , mDecoder(nullptr) , mBlend(BlendMethod::OVER) , mDisposal(DisposalMethod::KEEP) @@ -30,6 +26,13 @@ nsWebPDecoder::nsWebPDecoder(RasterImage* aImage) , mFormat(SurfaceFormat::B8G8R8X8) , mLastRow(0) , mCurrentFrame(0) + , mData(nullptr) + , mLength(0) + , mIteratorComplete(false) + , mNeedDemuxer(true) + , mGotColorProfile(false) + , mInProfile(nullptr) + , mTransform(nullptr) { MOZ_LOG(sWebPLog, LogLevel::Debug, ("[this=%p] nsWebPDecoder::nsWebPDecoder", this)); @@ -39,27 +42,157 @@ nsWebPDecoder::~nsWebPDecoder() { MOZ_LOG(sWebPLog, LogLevel::Debug, ("[this=%p] nsWebPDecoder::~nsWebPDecoder", this)); - WebPIDelete(mDecoder); + if (mDecoder) { + WebPIDelete(mDecoder); + WebPFreeDecBuffer(&mBuffer); + } + if (mInProfile) { + // mTransform belongs to us only if mInProfile is non-null + if (mTransform) { + qcms_transform_release(mTransform); + } + qcms_profile_release(mInProfile); + } +} + +LexerResult +nsWebPDecoder::ReadData() +{ + MOZ_ASSERT(mData); + MOZ_ASSERT(mLength > 0); + + WebPDemuxer* demuxer = nullptr; + bool complete = mIteratorComplete; + + if (mNeedDemuxer) { + WebPDemuxState state; + WebPData fragment; + fragment.bytes = mData; + fragment.size = mLength; + + demuxer = WebPDemuxPartial(&fragment, &state); + if (state == WEBP_DEMUX_PARSE_ERROR) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::ReadData -- demux parse error\n", this)); + WebPDemuxDelete(demuxer); + return LexerResult(TerminalState::FAILURE); + } + + if (state == WEBP_DEMUX_PARSING_HEADER) { + WebPDemuxDelete(demuxer); + return LexerResult(Yield::NEED_MORE_DATA); + } + + if (!demuxer) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::ReadData -- no demuxer\n", this)); + return LexerResult(TerminalState::FAILURE); + } + + complete = complete || state == WEBP_DEMUX_DONE; + } + + LexerResult rv(TerminalState::FAILURE); + if (!HasSize()) { + rv = ReadHeader(demuxer, complete); + } else { + rv = ReadPayload(demuxer, complete); + } + + WebPDemuxDelete(demuxer); + return rv; } LexerResult nsWebPDecoder::DoDecode(SourceBufferIterator& aIterator, IResumable* aOnResume) +{ + while (true) { + SourceBufferIterator::State state = SourceBufferIterator::COMPLETE; + if (!mIteratorComplete) { + state = aIterator.AdvanceOrScheduleResume(SIZE_MAX, aOnResume); + + // We need to remember since we can't advance a complete iterator. + mIteratorComplete = state == SourceBufferIterator::COMPLETE; + } + + if (state == SourceBufferIterator::WAITING) { + return LexerResult(Yield::NEED_MORE_DATA); + } + + LexerResult rv = UpdateBuffer(aIterator, state); + if (rv.is() && rv.as() == Yield::NEED_MORE_DATA) { + // We need to check the iterator to see if more is available before + // giving up unless we are already complete. + if (mIteratorComplete) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::DoDecode -- read all data, " + "but needs more\n", this)); + return LexerResult(TerminalState::FAILURE); + } + continue; + } + + return rv; + } +} + +LexerResult +nsWebPDecoder::UpdateBuffer(SourceBufferIterator& aIterator, + SourceBufferIterator::State aState) { MOZ_ASSERT(!HasError(), "Shouldn't call DoDecode after error!"); - return mLexer.Lex(aIterator, aOnResume, - [=](State aState, const char* aData, size_t aLength) { - switch (aState) { - case State::WEBP_DATA: - if (!HasSize()) { - return ReadHeader(aData, aLength); - } - return ReadPayload(aData, aLength); - case State::FINISHED_WEBP_DATA: - return FinishedData(); + switch (aState) { + case SourceBufferIterator::READY: + if (!mData) { + // For as long as we hold onto an iterator, we know the data pointers + // to the chunks cannot change underneath us, so save the pointer to + // the first block. + MOZ_ASSERT(mLength == 0); + mData = reinterpret_cast(aIterator.Data()); + } + mLength += aIterator.Length(); + return ReadData(); + case SourceBufferIterator::COMPLETE: + return ReadData(); + default: + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::DoDecode -- bad state\n", this)); + return LexerResult(TerminalState::FAILURE); + } + + // We need to buffer. If we have no data buffered, we need to get everything + // from the first chunk of the source buffer before appending the new data. + if (mBufferedData.empty()) { + MOZ_ASSERT(mData); + MOZ_ASSERT(mLength > 0); + + if (!mBufferedData.append(mData, mLength)) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::DoDecode -- oom, initialize %zu\n", + this, mLength)); + return LexerResult(TerminalState::FAILURE); } - MOZ_CRASH("Unknown State"); - }); + + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::DoDecode -- buffered %zu bytes\n", + this, mLength)); + } + + // Append the incremental data from the iterator. + if (!mBufferedData.append(aIterator.Data(), aIterator.Length())) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::DoDecode -- oom, append %zu on %zu\n", + this, aIterator.Length(), mBufferedData.length())); + return LexerResult(TerminalState::FAILURE); + } + + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::DoDecode -- buffered %zu -> %zu bytes\n", + this, aIterator.Length(), mBufferedData.length())); + mData = mBufferedData.begin(); + mLength = mBufferedData.length(); + return ReadData(); } nsresult @@ -69,13 +202,22 @@ nsWebPDecoder::CreateFrame(const nsIntRect& aFrameRect) MOZ_ASSERT(!mDecoder); MOZ_LOG(sWebPLog, LogLevel::Debug, - ("[this=%p] nsWebPDecoder::CreateFrame -- frame %u, %d x %d\n", - this, mCurrentFrame, aFrameRect.width, aFrameRect.height)); + ("[this=%p] nsWebPDecoder::CreateFrame -- frame %u, (%d, %d) %d x %d\n", + this, mCurrentFrame, aFrameRect.x, aFrameRect.y, + aFrameRect.width, aFrameRect.height)); + + if (aFrameRect.width <= 0 || aFrameRect.height <= 0) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::CreateFrame -- bad frame rect\n", + this)); + return NS_ERROR_FAILURE; + } // If this is our first frame in an animation and it doesn't cover the // full frame, then we are transparent even if there is no alpha if (mCurrentFrame == 0 && !aFrameRect.IsEqualEdges(FullFrame())) { MOZ_ASSERT(HasAnimation()); + mFormat = SurfaceFormat::B8G8R8A8; PostHasTransparency(); } @@ -90,16 +232,18 @@ nsWebPDecoder::CreateFrame(const nsIntRect& aFrameRect) return NS_ERROR_FAILURE; } + SurfacePipeFlags pipeFlags = SurfacePipeFlags(); + Maybe pipe = SurfacePipeFactory::CreateSurfacePipe(this, - mCurrentFrame, Size(), OutputSize(), aFrameRect, - mFormat, SurfacePipeFlags()); + mCurrentFrame, Size(), OutputSize(), aFrameRect, mFormat, pipeFlags); if (!pipe) { MOZ_LOG(sWebPLog, LogLevel::Error, ("[this=%p] nsWebPDecoder::CreateFrame -- no pipe\n", this)); return NS_ERROR_FAILURE; } - mPipe = Move(*pipe); + mFrameRect = aFrameRect; + mPipe = std::move(*pipe); return NS_OK; } @@ -118,154 +262,149 @@ nsWebPDecoder::EndFrame() this, mCurrentFrame, (int)opacity, (int)mDisposal, mTimeout.AsEncodedValueDeprecated(), (int)mBlend)); - PostFrameStop(opacity, mDisposal, mTimeout, mBlend); - WebPFreeDecBuffer(&mBuffer); + PostFrameStop(opacity); WebPIDelete(mDecoder); + WebPFreeDecBuffer(&mBuffer); mDecoder = nullptr; mLastRow = 0; ++mCurrentFrame; } -nsresult -nsWebPDecoder::GetDataBuffer(const uint8_t*& aData, size_t& aLength) +void +nsWebPDecoder::ApplyColorProfile(const char* aProfile, size_t aLength) { - if (!mData.empty() && mData.begin() != aData) { - if (!mData.append(aData, aLength)) { - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::GetDataBuffer -- oom, append %zu on %zu\n", - this, aLength, mData.length())); - return NS_ERROR_OUT_OF_MEMORY; - } - aData = mData.begin(); - aLength = mData.length(); + MOZ_ASSERT(!mGotColorProfile); + mGotColorProfile = true; + + if (GetSurfaceFlags() & SurfaceFlags::NO_COLORSPACE_CONVERSION) { + return; } - return NS_OK; -} -nsresult -nsWebPDecoder::SaveDataBuffer(const uint8_t* aData, size_t aLength) -{ - if (mData.empty() && !mData.append(aData, aLength)) { + auto mode = gfxPlatform::GetCMSMode(); + if (mode == eCMSMode_Off || (mode == eCMSMode_TaggedOnly && !aProfile)) { + return; + } + + if (!aProfile || !gfxPlatform::GetCMSOutputProfile()) { + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::ApplyColorProfile -- not tagged or no output " + "profile , use sRGB transform\n", this)); + mTransform = gfxPlatform::GetCMSRGBATransform(); + return; + } + + mInProfile = qcms_profile_from_memory(aProfile, aLength); + if (!mInProfile) { MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::SaveDataBuffer -- oom, append %zu on %zu\n", - this, aLength, mData.length())); - return NS_ERROR_OUT_OF_MEMORY; + ("[this=%p] nsWebPDecoder::ApplyColorProfile -- bad color profile\n", + this)); + return; } - return NS_OK; + + // Calculate rendering intent. + int intent = gfxPlatform::GetRenderingIntent(); + if (intent == -1) { + intent = qcms_profile_get_rendering_intent(mInProfile); + } + + // Create the color management transform. + mTransform = qcms_transform_create(mInProfile, + QCMS_DATA_RGBA_8, + gfxPlatform::GetCMSOutputProfile(), + QCMS_DATA_RGBA_8, + (qcms_intent)intent); + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::ApplyColorProfile -- use tagged " + "transform\n", this)); } -LexerTransition -nsWebPDecoder::ReadHeader(const char* aData, size_t aLength) +LexerResult +nsWebPDecoder::ReadHeader(WebPDemuxer* aDemuxer, + bool aIsComplete) { + MOZ_ASSERT(aDemuxer); + MOZ_LOG(sWebPLog, LogLevel::Debug, - ("[this=%p] nsWebPDecoder::ReadHeader -- %zu bytes\n", this, aLength)); - - // XXX(aosmond): In an ideal world, we could request the lexer to do this - // buffering for us (and in turn the underlying SourceBuffer). That way we - // could avoid extra copies during the decode and just do - // SourceBuffer::Compact on each iteration. For a typical WebP image we - // can hope that we will get the full header in the first packet, but - // for animated images we will end up buffering the whole stream if it - // not already fully received and contiguous. - auto data = (const uint8_t*)aData; - size_t length = aLength; - if (NS_FAILED(GetDataBuffer(data, length))) { - return Transition::TerminateFailure(); - } + ("[this=%p] nsWebPDecoder::ReadHeader -- %zu bytes\n", this, mLength)); + + uint32_t flags = WebPDemuxGetI(aDemuxer, WEBP_FF_FORMAT_FLAGS); - WebPBitstreamFeatures features; - VP8StatusCode status = WebPGetFeatures(data, length, &features); - switch (status) { - case VP8_STATUS_OK: - break; - case VP8_STATUS_NOT_ENOUGH_DATA: - if (NS_FAILED(SaveDataBuffer(data, length))) { - return Transition::TerminateFailure(); + if (!IsMetadataDecode() && !mGotColorProfile) { + if (flags & WebPFeatureFlags::ICCP_FLAG) { + WebPChunkIterator iter; + if (!WebPDemuxGetChunk(aDemuxer, "ICCP", 1, &iter)) { + return aIsComplete ? LexerResult(TerminalState::FAILURE) + : LexerResult(Yield::NEED_MORE_DATA); } - return Transition::ContinueUnbuffered(State::WEBP_DATA); - default: - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadHeader -- parse error %d\n", - this, status)); - return Transition::TerminateFailure(); + + ApplyColorProfile(reinterpret_cast(iter.chunk.bytes), + iter.chunk.size); + WebPDemuxReleaseChunkIterator(&iter); + } else { + ApplyColorProfile(nullptr, 0); + } } - if (features.has_animation) { + if (flags & WebPFeatureFlags::ANIMATION_FLAG) { // A metadata decode expects to get the correct first frame timeout which // sadly is not provided by the normal WebP header parsing. - WebPDemuxState state; - WebPData fragment; - fragment.bytes = data; - fragment.size = length; - WebPDemuxer* demuxer = WebPDemuxPartial(&fragment, &state); - if (!demuxer || state == WEBP_DEMUX_PARSE_ERROR) { - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadHeader -- demux parse error\n", this)); - WebPDemuxDelete(demuxer); - return Transition::TerminateFailure(); - } - WebPIterator iter; - if (!WebPDemuxGetFrame(demuxer, 1, &iter)) { - WebPDemuxDelete(demuxer); - if (state == WEBP_DEMUX_DONE) { - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadHeader -- demux parse error\n", - this)); - return Transition::TerminateFailure(); - } - if (NS_FAILED(SaveDataBuffer(data, length))) { - return Transition::TerminateFailure(); - } - return Transition::ContinueUnbuffered(State::WEBP_DATA); + if (!WebPDemuxGetFrame(aDemuxer, 1, &iter)) { + return aIsComplete ? LexerResult(TerminalState::FAILURE) + : LexerResult(Yield::NEED_MORE_DATA); } PostIsAnimated(FrameTimeout::FromRawMilliseconds(iter.duration)); WebPDemuxReleaseIterator(&iter); - WebPDemuxDelete(demuxer); + } else { + // Single frames don't need a demuxer to be created. + mNeedDemuxer = false; } - MOZ_LOG(sWebPLog, LogLevel::Debug, - ("[this=%p] nsWebPDecoder::ReadHeader -- %d x %d, alpha %d, " - "animation %d, format %d, metadata decode %d, first frame decode %d\n", - this, features.width, features.height, features.has_alpha, - features.has_animation, features.format, IsMetadataDecode(), - IsFirstFrameDecode())); - - PostSize(features.width, features.height); - if (features.has_alpha) { + uint32_t width = WebPDemuxGetI(aDemuxer, WEBP_FF_CANVAS_WIDTH); + uint32_t height = WebPDemuxGetI(aDemuxer, WEBP_FF_CANVAS_HEIGHT); + if (width > INT32_MAX || height > INT32_MAX) { + return LexerResult(TerminalState::FAILURE); + } + + PostSize(width, height); + + bool alpha = flags & WebPFeatureFlags::ALPHA_FLAG; + if (alpha) { mFormat = SurfaceFormat::B8G8R8A8; PostHasTransparency(); } + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::ReadHeader -- %u x %u, alpha %d, " + "animation %d, metadata decode %d, first frame decode %d\n", + this, width, height, alpha, HasAnimation(), + IsMetadataDecode(), IsFirstFrameDecode())); + if (IsMetadataDecode()) { - return Transition::TerminateSuccess(); + return LexerResult(TerminalState::SUCCESS); } - auto transition = ReadPayload((const char*)data, length); - if (!features.has_animation) { - mData.clearAndFree(); - } - return transition; + return ReadPayload(aDemuxer, aIsComplete); } -LexerTransition -nsWebPDecoder::ReadPayload(const char* aData, size_t aLength) +LexerResult +nsWebPDecoder::ReadPayload(WebPDemuxer* aDemuxer, + bool aIsComplete) { - auto data = (const uint8_t*)aData; if (!HasAnimation()) { - auto rv = ReadSingle(data, aLength, true, FullFrame()); - if (rv.NextStateIsTerminal() && - rv.NextStateAsTerminal() == TerminalState::SUCCESS) { + auto rv = ReadSingle(mData, mLength, FullFrame()); + if (rv.is() && + rv.as() == TerminalState::SUCCESS) { PostDecodeDone(); } return rv; } - return ReadMultiple(data, aLength); + return ReadMultiple(aDemuxer, aIsComplete); } -LexerTransition -nsWebPDecoder::ReadSingle(const uint8_t* aData, size_t aLength, bool aAppend, const IntRect& aFrameRect) +LexerResult +nsWebPDecoder::ReadSingle(const uint8_t* aData, size_t aLength, const IntRect& aFrameRect) { MOZ_ASSERT(!IsMetadataDecode()); MOZ_ASSERT(aData); @@ -275,125 +414,120 @@ nsWebPDecoder::ReadSingle(const uint8_t* aData, size_t aLength, bool aAppend, co ("[this=%p] nsWebPDecoder::ReadSingle -- %zu bytes\n", this, aLength)); if (!mDecoder && NS_FAILED(CreateFrame(aFrameRect))) { - return Transition::TerminateFailure(); + return LexerResult(TerminalState::FAILURE); } - // XXX(aosmond): The demux API can be used for single images according to the - // documentation. If WebPIAppend is not any more efficient in its buffering - // than what we do for animated images, we should just combine the use cases. bool complete; - VP8StatusCode status; - if (aAppend) { - status = WebPIAppend(mDecoder, aData, aLength); - } else { - status = WebPIUpdate(mDecoder, aData, aLength); - } - switch (status) { - case VP8_STATUS_OK: - complete = true; - break; - case VP8_STATUS_SUSPENDED: - complete = false; - break; - default: - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadSingle -- append error %d\n", - this, status)); - return Transition::TerminateFailure(); - } + do { + VP8StatusCode status = WebPIUpdate(mDecoder, aData, aLength); + switch (status) { + case VP8_STATUS_OK: + complete = true; + break; + case VP8_STATUS_SUSPENDED: + complete = false; + break; + default: + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::ReadSingle -- append error %d\n", + this, status)); + return LexerResult(TerminalState::FAILURE); + } - int lastRow = -1; - int width = 0; - int height = 0; - int stride = 0; - const uint8_t* rowStart = WebPIDecGetRGB(mDecoder, &lastRow, &width, &height, &stride); - if (!rowStart || lastRow == -1) { - return Transition::ContinueUnbuffered(State::WEBP_DATA); - } + int lastRow = -1; + int width = 0; + int height = 0; + int stride = 0; + uint8_t* rowStart = WebPIDecGetRGB(mDecoder, &lastRow, &width, &height, &stride); - if (width <= 0 || height <= 0 || stride <= 0) { - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadSingle -- bad (w,h,s) = (%d, %d, %d)\n", - this, width, height, stride)); - return Transition::TerminateFailure(); - } + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::ReadSingle -- complete %d, read %d rows, " + "has %d rows available\n", this, complete, mLastRow, lastRow)); + + if (!rowStart || lastRow == -1 || lastRow == mLastRow) { + return LexerResult(Yield::NEED_MORE_DATA); + } - for (int row = mLastRow; row < lastRow; row++) { - const uint8_t* src = rowStart + row * stride; - auto result = mPipe.WritePixelsToRow([&]() -> NextPixel { - MOZ_ASSERT(mFormat == SurfaceFormat::B8G8R8A8 || src[3] == 0xFF); - const uint32_t pixel = gfxPackedPixel(src[3], src[0], src[1], src[2]); - src += 4; - return AsVariant(pixel); - }); - MOZ_ASSERT(result != WriteState::FAILURE); - MOZ_ASSERT_IF(result == WriteState::FINISHED, complete && row == lastRow - 1); - - if (result == WriteState::FAILURE) { + if (width != mFrameRect.width || height != mFrameRect.height || + stride < mFrameRect.width * 4 || + lastRow > mFrameRect.height) { MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadSingle -- write pixels error\n", - this)); - return Transition::TerminateFailure(); + ("[this=%p] nsWebPDecoder::ReadSingle -- bad (w,h,s) = (%d, %d, %d)\n", + this, width, height, stride)); + return LexerResult(TerminalState::FAILURE); } - } - if (mLastRow != lastRow) { - mLastRow = lastRow; + const bool noPremultiply = + bool(GetSurfaceFlags() & SurfaceFlags::NO_PREMULTIPLY_ALPHA); + + for (int row = mLastRow; row < lastRow; row++) { + uint8_t* src = rowStart + row * stride; + if (mTransform) { + qcms_transform_data(mTransform, src, src, width); + } + + WriteState result; + if (noPremultiply) { + result = mPipe.WritePixelsToRow([&]() -> NextPixel { + MOZ_ASSERT(mFormat == SurfaceFormat::B8G8R8A8 || src[3] == 0xFF); + const uint32_t pixel = + gfxPackedPixelNoPreMultiply(src[3], src[0], src[1], src[2]); + src += 4; + return AsVariant(pixel); + }); + } else { + result = mPipe.WritePixelsToRow([&]() -> NextPixel { + MOZ_ASSERT(mFormat == SurfaceFormat::B8G8R8A8 || src[3] == 0xFF); + const uint32_t pixel = gfxPackedPixel(src[3], src[0], src[1], src[2]); + src += 4; + return AsVariant(pixel); + }); + } - Maybe invalidRect = mPipe.TakeInvalidRect(); - if (invalidRect) { - PostInvalidation(invalidRect->mInputSpaceRect, - Some(invalidRect->mOutputSpaceRect)); + Maybe invalidRect = mPipe.TakeInvalidRect(); + if (invalidRect) { + PostInvalidation(invalidRect->mInputSpaceRect, + Some(invalidRect->mOutputSpaceRect)); + } + + if (result == WriteState::FAILURE) { + MOZ_LOG(sWebPLog, LogLevel::Error, + ("[this=%p] nsWebPDecoder::ReadSingle -- write pixels error\n", + this)); + return LexerResult(TerminalState::FAILURE); + } + + if (result == WriteState::FINISHED) { + MOZ_ASSERT(row == lastRow - 1, "There was more data to read?"); + complete = true; + break; + } } - } + + mLastRow = lastRow; + } while (!complete); if (!complete) { - return Transition::ContinueUnbuffered(State::WEBP_DATA); + return LexerResult(Yield::NEED_MORE_DATA); } EndFrame(); - return Transition::TerminateSuccess(); + return LexerResult(TerminalState::SUCCESS); } -LexerTransition -nsWebPDecoder::ReadMultiple(const uint8_t* aData, size_t aLength) +LexerResult +nsWebPDecoder::ReadMultiple(WebPDemuxer* aDemuxer, bool aIsComplete) { MOZ_ASSERT(!IsMetadataDecode()); - MOZ_ASSERT(aData); + MOZ_ASSERT(aDemuxer); MOZ_LOG(sWebPLog, LogLevel::Debug, - ("[this=%p] nsWebPDecoder::ReadMultiple -- %zu bytes\n", this, aLength)); - - auto data = aData; - size_t length = aLength; - if (NS_FAILED(GetDataBuffer(data, length))) { - return Transition::TerminateFailure(); - } - - WebPDemuxState state; - WebPData fragment; - fragment.bytes = data; - fragment.size = length; - WebPDemuxer* demuxer = WebPDemuxPartial(&fragment, &state); - if (!demuxer) { - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadMultiple -- create demuxer error\n", - this)); - return Transition::TerminateFailure(); - } - - if (state == WEBP_DEMUX_PARSE_ERROR) { - MOZ_LOG(sWebPLog, LogLevel::Error, - ("[this=%p] nsWebPDecoder::ReadMultiple -- demuxer parse error\n", - this)); - WebPDemuxDelete(demuxer); - return Transition::TerminateFailure(); - } + ("[this=%p] nsWebPDecoder::ReadMultiple\n", this)); - bool complete = false; + bool complete = aIsComplete; WebPIterator iter; - auto rv = Transition::ContinueUnbuffered(State::WEBP_DATA); - if (WebPDemuxGetFrame(demuxer, mCurrentFrame + 1, &iter)) { + auto rv = LexerResult(Yield::NEED_MORE_DATA); + if (WebPDemuxGetFrame(aDemuxer, mCurrentFrame + 1, &iter)) { switch (iter.blend_method) { case WEBP_MUX_BLEND: mBlend = BlendMethod::OVER; @@ -418,51 +552,34 @@ nsWebPDecoder::ReadMultiple(const uint8_t* aData, size_t aLength) break; } - mFormat = iter.has_alpha ? SurfaceFormat::B8G8R8A8 : SurfaceFormat::B8G8R8X8; + mFormat = iter.has_alpha || mCurrentFrame > 0 ? SurfaceFormat::B8G8R8A8 + : SurfaceFormat::B8G8R8X8; mTimeout = FrameTimeout::FromRawMilliseconds(iter.duration); nsIntRect frameRect(iter.x_offset, iter.y_offset, iter.width, iter.height); - rv = ReadSingle(iter.fragment.bytes, iter.fragment.size, false, frameRect); - complete = state == WEBP_DEMUX_DONE && !WebPDemuxNextFrame(&iter); + rv = ReadSingle(iter.fragment.bytes, iter.fragment.size, frameRect); + complete = complete && !WebPDemuxNextFrame(&iter); WebPDemuxReleaseIterator(&iter); } - if (rv.NextStateIsTerminal()) { - if (rv.NextStateAsTerminal() == TerminalState::SUCCESS) { - // If we extracted one frame, and it is not the last, we need to yield to - // the lexer to allow the upper layers to acknowledge the frame. - if (!complete && !IsFirstFrameDecode()) { - // The resume point is determined by whether or not we had to buffer. - // If we have yet to buffer, we want to resume at the same point, - // otherwise our internal buffer has everything we need and we want - // to resume having consumed all of the current fragment. - rv = Transition::ContinueUnbufferedAfterYield(State::WEBP_DATA, - mData.empty() ? 0 : aLength); - } else { - uint32_t loopCount = WebPDemuxGetI(demuxer, WEBP_FF_LOOP_COUNT); - - MOZ_LOG(sWebPLog, LogLevel::Debug, - ("[this=%p] nsWebPDecoder::ReadMultiple -- loop count %u\n", - this, loopCount)); - PostDecodeDone(loopCount - 1); - } + if (rv.is() && + rv.as() == TerminalState::SUCCESS) { + // If we extracted one frame, and it is not the last, we need to yield to + // the lexer to allow the upper layers to acknowledge the frame. + if (!complete && !IsFirstFrameDecode()) { + rv = LexerResult(Yield::OUTPUT_AVAILABLE); + } else { + uint32_t loopCount = WebPDemuxGetI(aDemuxer, WEBP_FF_LOOP_COUNT); + + MOZ_LOG(sWebPLog, LogLevel::Debug, + ("[this=%p] nsWebPDecoder::ReadMultiple -- loop count %u\n", + this, loopCount)); + PostDecodeDone(loopCount - 1); } - } else if (NS_FAILED(SaveDataBuffer(data, length))) { - rv = Transition::TerminateFailure(); } - WebPDemuxDelete(demuxer); return rv; } -LexerTransition -nsWebPDecoder::FinishedData() -{ - // Since we set up an unbuffered read for SIZE_MAX bytes, if we actually read - // all that data something is really wrong. - MOZ_ASSERT_UNREACHABLE("Read the entire address space?"); - return Transition::TerminateFailure(); -} - } // namespace image } // namespace mozilla diff --git a/image/decoders/nsWebPDecoder.h b/image/decoders/nsWebPDecoder.h index 5b3951cfc..cdd2849f3 100644 --- a/image/decoders/nsWebPDecoder.h +++ b/image/decoders/nsWebPDecoder.h @@ -16,7 +16,7 @@ namespace mozilla { namespace image { class RasterImage; -class nsWebPDecoder : public Decoder +class nsWebPDecoder final : public Decoder { public: virtual ~nsWebPDecoder(); @@ -31,34 +31,28 @@ private: // Decoders should only be instantiated via DecoderFactory. explicit nsWebPDecoder(RasterImage* aImage); - enum class State - { - WEBP_DATA, - FINISHED_WEBP_DATA - }; + void ApplyColorProfile(const char* aProfile, size_t aLength); - LexerTransition ReadHeader(const char* aData, size_t aLength); - LexerTransition ReadPayload(const char* aData, size_t aLength); - LexerTransition FinishedData(); + LexerResult UpdateBuffer(SourceBufferIterator& aIterator, + SourceBufferIterator::State aState); + LexerResult ReadData(); + LexerResult ReadHeader(WebPDemuxer* aDemuxer, bool aIsComplete); + LexerResult ReadPayload(WebPDemuxer* aDemuxer, bool aIsComplete); nsresult CreateFrame(const nsIntRect& aFrameRect); void EndFrame(); - nsresult GetDataBuffer(const uint8_t*& aData, size_t& aLength); - nsresult SaveDataBuffer(const uint8_t* aData, size_t aLength); + LexerResult ReadSingle(const uint8_t* aData, size_t aLength, + const IntRect& aFrameRect); - LexerTransition ReadSingle(const uint8_t* aData, size_t aLength, - bool aAppend, const IntRect& aFrameRect); - - LexerTransition ReadMultiple(const uint8_t* aData, size_t aLength); - - StreamingLexer mLexer; + LexerResult ReadMultiple(WebPDemuxer* aDemuxer, bool aIsComplete); /// The SurfacePipe used to write to the output surface. SurfacePipe mPipe; - /// The buffer used to accumulate data until the complete WebP header is received. - Vector mData; + /// The buffer used to accumulate data until the complete WebP header is + /// received, if and only if the iterator is discontiguous. + Vector mBufferedData; /// The libwebp output buffer descriptor pointing to the decoded data. WebPDecBuffer mBuffer; @@ -78,11 +72,35 @@ private: /// Surface format for the current frame. gfx::SurfaceFormat mFormat; + /// Frame rect for the current frame. + IntRect mFrameRect; + /// The last row of decoded pixels written to mPipe. int mLastRow; /// Number of decoded frames. uint32_t mCurrentFrame; + + /// Pointer to the start of the contiguous encoded image data. + const uint8_t* mData; + + /// Length of data pointed to by mData. + size_t mLength; + + /// True if the iterator has reached its end. + bool mIteratorComplete; + + /// True if this decoding pass requires a WebPDemuxer. + bool mNeedDemuxer; + + /// True if we have setup the color profile for the image. + bool mGotColorProfile; + + /// Color management profile from the ICCP chunk in the image. + qcms_profile* mInProfile; + + /// Color management transform to apply to image data. + qcms_transform* mTransform; }; } // namespace image -- cgit v1.2.3 From 807acf738f63d95fbecfb9129092aaa2379889ba Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Tue, 20 Nov 2018 20:38:49 +0100 Subject: Bug 1462355 - Part 1a. Make imgFrame animation parameters threadsafe. We currently choose to set the animation parameters (blend method, blend rect, disposal method, timeout) in imgFrame::Finish instead of imgFrame::InitForDecoder. The decoders themselves already have access to the necessary information at the time InitForDecoder is called, so there is no reason to do this. Moving the configuration to initialization will allow us to relax the mutex protection on these parameters. This part simply reorganizes imgFrame, and subsequent parts will introduce the necessary changes to SurfacePipe and decoders. --- image/AnimationParams.h | 45 +++++++++++++++++++++++++++++++ image/imgFrame.cpp | 39 ++++++++++++++------------- image/imgFrame.h | 72 ++++++++++++++++++------------------------------- 3 files changed, 91 insertions(+), 65 deletions(-) create mode 100644 image/AnimationParams.h diff --git a/image/AnimationParams.h b/image/AnimationParams.h new file mode 100644 index 000000000..916ba7e40 --- /dev/null +++ b/image/AnimationParams.h @@ -0,0 +1,45 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_image_AnimationParams_h +#define mozilla_image_AnimationParams_h + +#include +#include "mozilla/gfx/Rect.h" +#include "FrameTimeout.h" + +namespace mozilla { +namespace image { + +enum class BlendMethod : int8_t { + // All color components of the frame, including alpha, overwrite the current + // contents of the frame's output buffer region. + SOURCE, + + // The frame should be composited onto the output buffer based on its alpha, + // using a simple OVER operation. + OVER +}; + +enum class DisposalMethod : int8_t { + CLEAR_ALL = -1, // Clear the whole image, revealing what's underneath. + NOT_SPECIFIED, // Leave the frame and let the new frame draw on top. + KEEP, // Leave the frame and let the new frame draw on top. + CLEAR, // Clear the frame's area, revealing what's underneath. + RESTORE_PREVIOUS // Restore the previous (composited) frame. +}; + +struct AnimationParams +{ + gfx::IntRect mBlendRect; + FrameTimeout mTimeout; + uint32_t mFrameNum; + BlendMethod mBlendMethod; + DisposalMethod mDisposalMethod; +}; + +} // namespace image +} // namespace mozilla diff --git a/image/imgFrame.cpp b/image/imgFrame.cpp index 5da2ccec5..af84d8cbb 100644 --- a/image/imgFrame.cpp +++ b/image/imgFrame.cpp @@ -161,13 +161,13 @@ imgFrame::imgFrame() : mMonitor("imgFrame") , mDecoded(0, 0, 0, 0) , mLockCount(0) + , mAborted(false) + , mFinished(false) + , mOptimizable(false) , mTimeout(FrameTimeout::FromRawMilliseconds(100)) , mDisposalMethod(DisposalMethod::NOT_SPECIFIED) , mBlendMethod(BlendMethod::OVER) , mHasNoAlpha(false) - , mAborted(false) - , mFinished(false) - , mOptimizable(false) , mPalettedImageData(nullptr) , mPaletteDepth(0) , mNonPremult(false) @@ -192,7 +192,8 @@ imgFrame::InitForDecoder(const nsIntSize& aImageSize, const nsIntRect& aRect, SurfaceFormat aFormat, uint8_t aPaletteDepth /* = 0 */, - bool aNonPremult /* = false */) + bool aNonPremult /* = false */, + const Maybe& aAnimParams /* = Nothing() */) { // Assert for properties that should be verified by decoders, // warn for properties related to bad content. @@ -205,6 +206,15 @@ imgFrame::InitForDecoder(const nsIntSize& aImageSize, mImageSize = aImageSize; mFrameRect = aRect; + if (aAnimParams) { + mBlendRect = aAnimParams->mBlendRect; + mTimeout = aAnimParams->mTimeout; + mBlendMethod = aAnimParams->mBlendMethod; + mDisposalMethod = aAnimParams->mDisposalMethod; + } else { + mBlendRect = aRect; + } + // We only allow a non-trivial frame rect (i.e., a frame rect that doesn't // cover the entire image) for paletted animation frames. We never draw those // frames directly; we just use FrameAnimator to composite them and produce a @@ -242,13 +252,13 @@ imgFrame::InitForDecoder(const nsIntSize& aImageSize, } else { MOZ_ASSERT(!mImageSurface, "Called imgFrame::InitForDecoder() twice?"); - mVBuf = AllocateBufferForImage(mFrameRect.Size(), mFormat); - if (!mVBuf) { + mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat); + if (!mRawSurface) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } - mImageSurface = CreateLockedSurface(mVBuf, mFrameRect.Size(), mFormat); + mImageSurface = CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat); if (!mImageSurface) { NS_WARNING("Failed to create ImageSurface"); @@ -256,7 +266,7 @@ imgFrame::InitForDecoder(const nsIntSize& aImageSize, return NS_ERROR_OUT_OF_MEMORY; } - if (!ClearSurface(mVBuf, mFrameRect.Size(), mFormat)) { + if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) { NS_WARNING("Could not clear allocated buffer"); mAborted = true; return NS_ERROR_OUT_OF_MEMORY; @@ -607,12 +617,7 @@ imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) } void -imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */, - DisposalMethod aDisposalMethod /* = DisposalMethod::KEEP */, - FrameTimeout aTimeout - /* = FrameTimeout::FromRawMilliseconds(0) */, - BlendMethod aBlendMethod /* = BlendMethod::OVER */, - const Maybe& aBlendRect /* = Nothing() */) +imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */) { MonitorAutoLock lock(mMonitor); MOZ_ASSERT(mLockCount > 0, "Image data should be locked"); @@ -621,10 +626,6 @@ imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */, mHasNoAlpha = true; } - mDisposalMethod = aDisposalMethod; - mTimeout = aTimeout; - mBlendMethod = aBlendMethod; - mBlendRect = aBlendRect; ImageUpdatedInternal(GetRect()); mFinished = true; @@ -844,7 +845,7 @@ imgFrame::GetAnimationData() const bool hasAlpha = mFormat == SurfaceFormat::B8G8R8A8; return AnimationData(data, PaletteDataLength(), mTimeout, GetRect(), - mBlendMethod, mBlendRect, mDisposalMethod, hasAlpha); + mBlendMethod, Some(mBlendRect), mDisposalMethod, hasAlpha); } void diff --git a/image/imgFrame.h b/image/imgFrame.h index e864aca7f..32aabacae 100644 --- a/image/imgFrame.h +++ b/image/imgFrame.h @@ -12,6 +12,7 @@ #include "mozilla/Monitor.h" #include "mozilla/Move.h" #include "mozilla/VolatileBuffer.h" +#include "AnimationParams.h" #include "gfxDrawable.h" #include "imgIContainer.h" #include "MainThreadUtils.h" @@ -23,24 +24,6 @@ class ImageRegion; class DrawableFrameRef; class RawAccessFrameRef; -enum class BlendMethod : int8_t { - // All color components of the frame, including alpha, overwrite the current - // contents of the frame's output buffer region. - SOURCE, - - // The frame should be composited onto the output buffer based on its alpha, - // using a simple OVER operation. - OVER -}; - -enum class DisposalMethod : int8_t { - CLEAR_ALL = -1, // Clear the whole image, revealing what's underneath. - NOT_SPECIFIED, // Leave the frame and let the new frame draw on top. - KEEP, // Leave the frame and let the new frame draw on top. - CLEAR, // Clear the frame's area, revealing what's underneath. - RESTORE_PREVIOUS // Restore the previous (composited) frame. -}; - enum class Opacity : uint8_t { FULLY_OPAQUE, SOME_TRANSPARENCY @@ -210,14 +193,19 @@ public: const nsIntRect& aRect, SurfaceFormat aFormat, uint8_t aPaletteDepth = 0, - bool aNonPremult = false); + bool aNonPremult = false, + const Maybe& aAnimParams = Nothing()); nsresult InitForDecoder(const nsIntSize& aSize, SurfaceFormat aFormat, uint8_t aPaletteDepth = 0) { - return InitForDecoder(aSize, nsIntRect(0, 0, aSize.width, aSize.height), - aFormat, aPaletteDepth); + nsIntRect frameRect(0, 0, aSize.width, aSize.height); + AnimationParams animParams { frameRect, FrameTimeout::Forever(), + /* aFrameNum */ 1, BlendMethod::OVER, + DisposalMethod::NOT_SPECIFIED }; + return InitForDecoder(aSize, frameRect, + aFormat, aPaletteDepth, false, Some(animParams)); } @@ -268,22 +256,8 @@ public: * RawAccessFrameRef pointing to an imgFrame. * * @param aFrameOpacity Whether this imgFrame is opaque. - * @param aDisposalMethod For animation frames, how this imgFrame is cleared - * from the compositing frame before the next frame is - * displayed. - * @param aTimeout For animation frames, the timeout before the next - * frame is displayed. - * @param aBlendMethod For animation frames, a blending method to be used - * when compositing this frame. - * @param aBlendRect For animation frames, if present, the subrect in - * which @aBlendMethod applies. Outside of this - * subrect, BlendMethod::OVER is always used. */ - void Finish(Opacity aFrameOpacity = Opacity::SOME_TRANSPARENCY, - DisposalMethod aDisposalMethod = DisposalMethod::KEEP, - FrameTimeout aTimeout = FrameTimeout::FromRawMilliseconds(0), - BlendMethod aBlendMethod = BlendMethod::OVER, - const Maybe& aBlendRect = Nothing()); + void Finish(Opacity aFrameOpacity = Opacity::SOME_TRANSPARENCY); /** * Mark this imgFrame as aborted. This informs the imgFrame that if it isn't @@ -322,9 +296,15 @@ public: */ uint32_t GetBytesPerPixel() const { return GetIsPaletted() ? 1 : 4; } - IntSize GetImageSize() const { return mImageSize; } - IntRect GetRect() const { return mFrameRect; } + const IntSize& GetImageSize() const { return mImageSize; } + const IntRect& GetRect() const { return mFrameRect; } IntSize GetSize() const { return mFrameRect.Size(); } + const IntRect& GetBlendRect() const { return mBlendRect; } + IntRect GetBoundedBlendRect() const { return mBlendRect.Intersect(mFrameRect); } + FrameTimeout GetTimeout() const { return mTimeout; } + BlendMethod GetBlendMethod() const { return mBlendMethod; } + DisposalMethod GetDisposalMethod() const { return mDisposalMethod; } + bool FormatHasAlpha() const { return mFormat == SurfaceFormat::B8G8R8A8; } void GetImageData(uint8_t** aData, uint32_t* length) const; uint8_t* GetImageData() const; @@ -406,14 +386,6 @@ private: // data //! Number of RawAccessFrameRefs currently alive for this imgFrame. int32_t mLockCount; - //! The timeout for this frame. - FrameTimeout mTimeout; - - DisposalMethod mDisposalMethod; - BlendMethod mBlendMethod; - Maybe mBlendRect; - SurfaceFormat mFormat; - bool mHasNoAlpha; bool mAborted; bool mFinished; @@ -426,6 +398,14 @@ private: // data IntSize mImageSize; IntRect mFrameRect; + IntRect mBlendRect; + + //! The timeout for this frame. + FrameTimeout mTimeout; + + DisposalMethod mDisposalMethod; + BlendMethod mBlendMethod; + SurfaceFormat mFormat; // The palette and image data for images that are paletted, since Cairo // doesn't support these images. -- cgit v1.2.3 From 622098073e132995994fac4d61e3629d08ee1801 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Wed, 21 Nov 2018 12:57:17 +0100 Subject: Bug 1462355 - Part 1b. Update Decoder and SurfacePipe plumbing to use updated imgFrame methods. --- image/Decoder.cpp | 49 ++++++++++------------- image/Decoder.h | 22 +++++----- image/DownscalingFilter.h | 4 +- image/SurfaceFilters.h | 6 +-- image/SurfacePipe.cpp | 13 +++--- image/SurfacePipe.h | 6 ++- image/SurfacePipeFactory.h | 22 +++++----- image/test/gtest/Common.h | 4 +- image/test/gtest/TestADAM7InterpolatingFilter.cpp | 6 +-- image/test/gtest/TestDeinterlacingFilter.cpp | 6 +-- image/test/gtest/TestDownscalingFilter.cpp | 6 +-- image/test/gtest/TestDownscalingFilterNoSkia.cpp | 2 +- image/test/gtest/TestRemoveFrameRectFilter.cpp | 6 +-- image/test/gtest/TestSurfacePipeIntegration.cpp | 20 ++++----- image/test/gtest/TestSurfaceSink.cpp | 4 +- 15 files changed, 84 insertions(+), 92 deletions(-) diff --git a/image/Decoder.cpp b/image/Decoder.cpp index f9b1034cf..a6fd67c3f 100644 --- a/image/Decoder.cpp +++ b/image/Decoder.cpp @@ -278,14 +278,14 @@ Decoder::Telemetry() const } nsresult -Decoder::AllocateFrame(uint32_t aFrameNum, - const gfx::IntSize& aOutputSize, +Decoder::AllocateFrame(const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect, gfx::SurfaceFormat aFormat, - uint8_t aPaletteDepth) + uint8_t aPaletteDepth, + const Maybe& aAnimParams) { - mCurrentFrame = AllocateFrameInternal(aFrameNum, aOutputSize, aFrameRect, - aFormat, aPaletteDepth, + mCurrentFrame = AllocateFrameInternal(aOutputSize, aFrameRect, aFormat, + aPaletteDepth, aAnimParams, mCurrentFrame.get()); if (mCurrentFrame) { @@ -295,7 +295,7 @@ Decoder::AllocateFrame(uint32_t aFrameNum, // We should now be on |aFrameNum|. (Note that we're comparing the frame // number, which is zero-based, with the frame count, which is one-based.) - MOZ_ASSERT(aFrameNum + 1 == mFrameCount); + MOZ_ASSERT(aAnimParams, aAnimParams->mFrameNum + 1 == mFrameCount); // If we're past the first frame, PostIsAnimated() should've been called. MOZ_ASSERT_IF(mFrameCount > 1, HasAnimation()); @@ -309,18 +309,19 @@ Decoder::AllocateFrame(uint32_t aFrameNum, } RawAccessFrameRef -Decoder::AllocateFrameInternal(uint32_t aFrameNum, - const gfx::IntSize& aOutputSize, +Decoder::AllocateFrameInternal(const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect, SurfaceFormat aFormat, uint8_t aPaletteDepth, + const Maybe& aAnimParams, imgFrame* aPreviousFrame) { if (HasError()) { return RawAccessFrameRef(); } - if (aFrameNum != mFrameCount) { + uint32_t frameNum = aAnimParams ? aAnimParams->mFrameNum : 0; + if (frameNum != mFrameCount) { MOZ_ASSERT_UNREACHABLE("Allocating frames out of order"); return RawAccessFrameRef(); } @@ -334,7 +335,8 @@ Decoder::AllocateFrameInternal(uint32_t aFrameNum, NotNull> frame = WrapNotNull(new imgFrame()); bool nonPremult = bool(mSurfaceFlags & SurfaceFlags::NO_PREMULTIPLY_ALPHA); if (NS_FAILED(frame->InitForDecoder(aOutputSize, aFrameRect, aFormat, - aPaletteDepth, nonPremult))) { + aPaletteDepth, nonPremult, + aAnimParams))) { NS_WARNING("imgFrame::Init should succeed"); return RawAccessFrameRef(); } @@ -345,22 +347,22 @@ Decoder::AllocateFrameInternal(uint32_t aFrameNum, return RawAccessFrameRef(); } - if (aFrameNum == 1) { + if (frameNum == 1) { MOZ_ASSERT(aPreviousFrame, "Must provide a previous frame when animated"); aPreviousFrame->SetRawAccessOnly(); // If we dispose of the first frame by clearing it, then the first frame's // refresh area is all of itself. // RESTORE_PREVIOUS is invalid (assumed to be DISPOSE_CLEAR). - AnimationData previousFrameData = aPreviousFrame->GetAnimationData(); - if (previousFrameData.mDisposalMethod == DisposalMethod::CLEAR || - previousFrameData.mDisposalMethod == DisposalMethod::CLEAR_ALL || - previousFrameData.mDisposalMethod == DisposalMethod::RESTORE_PREVIOUS) { - mFirstFrameRefreshArea = previousFrameData.mRect; + DisposalMethod prevDisposal = aPreviousFrame->GetDisposalMethod(); + if (prevDisposal == DisposalMethod::CLEAR || + prevDisposal == DisposalMethod::CLEAR_ALL || + prevDisposal == DisposalMethod::RESTORE_PREVIOUS) { + mFirstFrameRefreshArea = aPreviousFrame->GetRect(); } } - if (aFrameNum > 0) { + if (frameNum > 0) { ref->SetRawAccessOnly(); // Some GIFs are huge but only have a small area that they animate. We only @@ -432,13 +434,7 @@ Decoder::PostIsAnimated(FrameTimeout aFirstFrameTimeout) } void -Decoder::PostFrameStop(Opacity aFrameOpacity - /* = Opacity::SOME_TRANSPARENCY */, - DisposalMethod aDisposalMethod - /* = DisposalMethod::KEEP */, - FrameTimeout aTimeout /* = FrameTimeout::Forever() */, - BlendMethod aBlendMethod /* = BlendMethod::OVER */, - const Maybe& aBlendRect /* = Nothing() */) +Decoder::PostFrameStop(Opacity aFrameOpacity) { // We should be mid-frame MOZ_ASSERT(!IsMetadataDecode(), "Stopping frame during metadata decode"); @@ -449,12 +445,11 @@ Decoder::PostFrameStop(Opacity aFrameOpacity mInFrame = false; mFinishedNewFrame = true; - mCurrentFrame->Finish(aFrameOpacity, aDisposalMethod, aTimeout, - aBlendMethod, aBlendRect); + mCurrentFrame->Finish(aFrameOpacity); mProgress |= FLAG_FRAME_COMPLETE; - mLoopLength += aTimeout; + mLoopLength += mCurrentFrame->GetTimeout(); // If we're not sending partial invalidations, then we send an invalidation // here when the first frame is complete. diff --git a/image/Decoder.h b/image/Decoder.h index 065a3c213..ed0c19687 100644 --- a/image/Decoder.h +++ b/image/Decoder.h @@ -11,6 +11,7 @@ #include "mozilla/Maybe.h" #include "mozilla/NotNull.h" #include "mozilla/RefPtr.h" +#include "AnimationParams.h" #include "DecodePool.h" #include "DecoderFlags.h" #include "Downscaler.h" @@ -28,6 +29,8 @@ namespace Telemetry { namespace image { +class imgFrame; + struct DecoderFinalStatus final { DecoderFinalStatus(bool aWasMetadataDecode, @@ -443,11 +446,7 @@ protected: // Specify whether this frame is opaque as an optimization. // For animated images, specify the disposal, blend method and timeout for // this frame. - void PostFrameStop(Opacity aFrameOpacity = Opacity::SOME_TRANSPARENCY, - DisposalMethod aDisposalMethod = DisposalMethod::KEEP, - FrameTimeout aTimeout = FrameTimeout::Forever(), - BlendMethod aBlendMethod = BlendMethod::OVER, - const Maybe& aBlendRect = Nothing()); + void PostFrameStop(Opacity aFrameOpacity = Opacity::SOME_TRANSPARENCY); /** * Called by the decoders when they have a region to invalidate. We may not @@ -476,16 +475,13 @@ protected: /** * Allocates a new frame, making it our current frame if successful. * - * The @aFrameNum parameter only exists as a sanity check; it's illegal to - * create a new frame anywhere but immediately after the existing frames. - * * If a non-paletted frame is desired, pass 0 for aPaletteDepth. */ - nsresult AllocateFrame(uint32_t aFrameNum, - const gfx::IntSize& aOutputSize, + nsresult AllocateFrame(const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect, gfx::SurfaceFormat aFormat, - uint8_t aPaletteDepth = 0); + uint8_t aPaletteDepth = 0, + const Maybe& aAnimParams = Nothing()); private: /// Report that an error was encountered while decoding. @@ -509,11 +505,11 @@ private: return mInFrame ? mFrameCount - 1 : mFrameCount; } - RawAccessFrameRef AllocateFrameInternal(uint32_t aFrameNum, - const gfx::IntSize& aOutputSize, + RawAccessFrameRef AllocateFrameInternal(const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect, gfx::SurfaceFormat aFormat, uint8_t aPaletteDepth, + const Maybe& aAnimParams, imgFrame* aPreviousFrame); protected: diff --git a/image/DownscalingFilter.h b/image/DownscalingFilter.h index 1485b85c2..936abdcd5 100644 --- a/image/DownscalingFilter.h +++ b/image/DownscalingFilter.h @@ -74,7 +74,7 @@ public: Maybe TakeInvalidRect() override { return Nothing(); } template - nsresult Configure(const DownscalingConfig& aConfig, Rest... aRest) + nsresult Configure(const DownscalingConfig& aConfig, const Rest&... aRest) { return NS_ERROR_FAILURE; } @@ -115,7 +115,7 @@ public: } template - nsresult Configure(const DownscalingConfig& aConfig, Rest... aRest) + nsresult Configure(const DownscalingConfig& aConfig, const Rest&... aRest) { nsresult rv = mNext.Configure(aRest...); if (NS_FAILED(rv)) { diff --git a/image/SurfaceFilters.h b/image/SurfaceFilters.h index 1885661b9..70c8d4087 100644 --- a/image/SurfaceFilters.h +++ b/image/SurfaceFilters.h @@ -70,7 +70,7 @@ public: { } template - nsresult Configure(const DeinterlacingConfig& aConfig, Rest... aRest) + nsresult Configure(const DeinterlacingConfig& aConfig, const Rest&... aRest) { nsresult rv = mNext.Configure(aRest...); if (NS_FAILED(rv)) { @@ -360,7 +360,7 @@ public: { } template - nsresult Configure(const RemoveFrameRectConfig& aConfig, Rest... aRest) + nsresult Configure(const RemoveFrameRectConfig& aConfig, const Rest&... aRest) { nsresult rv = mNext.Configure(aRest...); if (NS_FAILED(rv)) { @@ -590,7 +590,7 @@ public: { } template - nsresult Configure(const ADAM7InterpolatingConfig& aConfig, Rest... aRest) + nsresult Configure(const ADAM7InterpolatingConfig& aConfig, const Rest&... aRest) { nsresult rv = mNext.Configure(aRest...); if (NS_FAILED(rv)) { diff --git a/image/SurfacePipe.cpp b/image/SurfacePipe.cpp index 5c306144a..882315112 100644 --- a/image/SurfacePipe.cpp +++ b/image/SurfacePipe.cpp @@ -104,10 +104,11 @@ SurfaceSink::Configure(const SurfaceConfig& aConfig) // XXX(seth): Once every Decoder subclass uses SurfacePipe, we probably want // to allocate the frame directly here and get rid of Decoder::AllocateFrame // altogether. - nsresult rv = aConfig.mDecoder->AllocateFrame(aConfig.mFrameNum, - surfaceSize, + nsresult rv = aConfig.mDecoder->AllocateFrame(surfaceSize, frameRect, - aConfig.mFormat); + aConfig.mFormat, + /* aPaletteDepth */ 0, + aConfig.mAnimParams); if (NS_FAILED(rv)) { return rv; } @@ -154,11 +155,11 @@ PalettedSurfaceSink::Configure(const PalettedSurfaceConfig& aConfig) // XXX(seth): Once every Decoder subclass uses SurfacePipe, we probably want // to allocate the frame directly here and get rid of Decoder::AllocateFrame // altogether. - nsresult rv = aConfig.mDecoder->AllocateFrame(aConfig.mFrameNum, - aConfig.mOutputSize, + nsresult rv = aConfig.mDecoder->AllocateFrame(aConfig.mOutputSize, aConfig.mFrameRect, aConfig.mFormat, - aConfig.mPaletteDepth); + aConfig.mPaletteDepth, + aConfig.mAnimParams); if (NS_FAILED(rv)) { return rv; } diff --git a/image/SurfacePipe.h b/image/SurfacePipe.h index f046afa56..61c8d30df 100644 --- a/image/SurfacePipe.h +++ b/image/SurfacePipe.h @@ -34,6 +34,8 @@ #include "mozilla/Variant.h" #include "mozilla/gfx/2D.h" +#include "AnimationParams.h" + namespace mozilla { namespace image { @@ -722,10 +724,10 @@ struct SurfaceConfig { using Filter = SurfaceSink; Decoder* mDecoder; /// Which Decoder to use to allocate the surface. - uint32_t mFrameNum; /// Which frame of animation this surface is for. gfx::IntSize mOutputSize; /// The size of the surface. gfx::SurfaceFormat mFormat; /// The surface format (BGRA or BGRX). bool mFlipVertically; /// If true, write the rows from bottom to top. + Maybe mAnimParams; /// Given for animated images. }; /** @@ -750,12 +752,12 @@ struct PalettedSurfaceConfig { using Filter = PalettedSurfaceSink; Decoder* mDecoder; /// Which Decoder to use to allocate the surface. - uint32_t mFrameNum; /// Which frame of animation this surface is for. gfx::IntSize mOutputSize; /// The logical size of the surface. gfx::IntRect mFrameRect; /// The surface subrect which contains data. gfx::SurfaceFormat mFormat; /// The surface format (BGRA or BGRX). uint8_t mPaletteDepth; /// The palette depth of this surface. bool mFlipVertically; /// If true, write the rows from bottom to top. + Maybe mAnimParams; /// Given for animated images. }; /** diff --git a/image/SurfacePipeFactory.h b/image/SurfacePipeFactory.h index ff53fec5c..4571f2e13 100644 --- a/image/SurfacePipeFactory.h +++ b/image/SurfacePipeFactory.h @@ -70,8 +70,6 @@ public: * * @param aDecoder The decoder whose current frame the SurfacePipe will write * to. - * @param aFrameNum Which frame the SurfacePipe will write to. This will be 0 - * for non-animated images. * @param aInputSize The original size of the image. * @param aOutputSize The size the SurfacePipe should output. Must be the same * as @aInputSize or smaller. If smaller, the image will be @@ -79,6 +77,7 @@ public: * @param aFrameRect The portion of the image that actually contains data. * @param aFormat The surface format of the image; generally B8G8R8A8 or * B8G8R8X8. + * @param aAnimParams Extra parameters used by animated images. * @param aFlags Flags enabling or disabling various functionality for the * SurfacePipe; see the SurfacePipeFlags documentation for more * information. @@ -89,11 +88,11 @@ public: */ static Maybe CreateSurfacePipe(Decoder* aDecoder, - uint32_t aFrameNum, const nsIntSize& aInputSize, const nsIntSize& aOutputSize, const nsIntRect& aFrameRect, gfx::SurfaceFormat aFormat, + const Maybe& aAnimParams, SurfacePipeFlags aFlags) { const bool deinterlace = bool(aFlags & SurfacePipeFlags::DEINTERLACE); @@ -125,8 +124,8 @@ public: ADAM7InterpolatingConfig interpolatingConfig; RemoveFrameRectConfig removeFrameRectConfig { aFrameRect }; DownscalingConfig downscalingConfig { aInputSize, aFormat }; - SurfaceConfig surfaceConfig { aDecoder, aFrameNum, aOutputSize, - aFormat, flipVertically }; + SurfaceConfig surfaceConfig { aDecoder, aOutputSize, aFormat, + flipVertically, aAnimParams }; Maybe pipe; @@ -181,13 +180,12 @@ public: * * @param aDecoder The decoder whose current frame the SurfacePipe will write * to. - * @param aFrameNum Which frame the SurfacePipe will write to. This will be 0 - * for non-animated images. * @param aInputSize The original size of the image. * @param aFrameRect The portion of the image that actually contains data. * @param aFormat The surface format of the image; generally B8G8R8A8 or * B8G8R8X8. * @param aPaletteDepth The palette depth of the image. + * @param aAnimParams Extra parameters used by animated images. * @param aFlags Flags enabling or disabling various functionality for the * SurfacePipe; see the SurfacePipeFlags documentation for more * information. @@ -198,11 +196,11 @@ public: */ static Maybe CreatePalettedSurfacePipe(Decoder* aDecoder, - uint32_t aFrameNum, const nsIntSize& aInputSize, const nsIntRect& aFrameRect, gfx::SurfaceFormat aFormat, uint8_t aPaletteDepth, + const Maybe& aAnimParams, SurfacePipeFlags aFlags) { const bool deinterlace = bool(aFlags & SurfacePipeFlags::DEINTERLACE); @@ -211,9 +209,9 @@ public: // Construct configurations for the SurfaceFilters. DeinterlacingConfig deinterlacingConfig { progressiveDisplay }; - PalettedSurfaceConfig palettedSurfaceConfig { aDecoder, aFrameNum, aInputSize, - aFrameRect, aFormat, aPaletteDepth, - flipVertically }; + PalettedSurfaceConfig palettedSurfaceConfig { aDecoder, aInputSize, aFrameRect, + aFormat, aPaletteDepth, + flipVertically, aAnimParams }; Maybe pipe; @@ -229,7 +227,7 @@ public: private: template static Maybe - MakePipe(Configs... aConfigs) + MakePipe(const Configs&... aConfigs) { auto pipe = MakeUnique::Type>(); nsresult rv = pipe->Configure(aConfigs...); diff --git a/image/test/gtest/Common.h b/image/test/gtest/Common.h index 79bed9fc1..0c288cddc 100644 --- a/image/test/gtest/Common.h +++ b/image/test/gtest/Common.h @@ -245,7 +245,7 @@ already_AddRefed CreateTrivialDecoder(); * @param aConfigs The configuration for the pipeline. */ template -void WithFilterPipeline(Decoder* aDecoder, Func aFunc, Configs... aConfigs) +void WithFilterPipeline(Decoder* aDecoder, Func aFunc, const Configs&... aConfigs) { auto pipe = MakeUnique::Type>(); nsresult rv = pipe->Configure(aConfigs...); @@ -268,7 +268,7 @@ void WithFilterPipeline(Decoder* aDecoder, Func aFunc, Configs... aConfigs) * @param aConfigs The configuration for the pipeline. */ template -void AssertConfiguringPipelineFails(Decoder* aDecoder, Configs... aConfigs) +void AssertConfiguringPipelineFails(Decoder* aDecoder, const Configs&... aConfigs) { auto pipe = MakeUnique::Type>(); nsresult rv = pipe->Configure(aConfigs...); diff --git a/image/test/gtest/TestADAM7InterpolatingFilter.cpp b/image/test/gtest/TestADAM7InterpolatingFilter.cpp index d9dab4346..d11224251 100644 --- a/image/test/gtest/TestADAM7InterpolatingFilter.cpp +++ b/image/test/gtest/TestADAM7InterpolatingFilter.cpp @@ -33,7 +33,7 @@ WithADAM7InterpolatingFilter(const IntSize& aSize, Func aFunc) WithFilterPipeline(decoder, Forward(aFunc), ADAM7InterpolatingConfig { }, - SurfaceConfig { decoder, 0, aSize, + SurfaceConfig { decoder, aSize, SurfaceFormat::B8G8R8A8, false }); } @@ -45,7 +45,7 @@ AssertConfiguringADAM7InterpolatingFilterFails(const IntSize& aSize) AssertConfiguringPipelineFails(decoder, ADAM7InterpolatingConfig { }, - SurfaceConfig { decoder, 0, aSize, + SurfaceConfig { decoder, aSize, SurfaceFormat::B8G8R8A8, false }); } @@ -664,7 +664,7 @@ TEST(ImageADAM7InterpolatingFilter, ConfiguringPalettedADAM7InterpolatingFilterF // should fail. AssertConfiguringPipelineFails(decoder, ADAM7InterpolatingConfig { }, - PalettedSurfaceConfig { decoder, 0, IntSize(100, 100), + PalettedSurfaceConfig { decoder, IntSize(100, 100), IntRect(0, 0, 50, 50), SurfaceFormat::B8G8R8A8, 8, false }); diff --git a/image/test/gtest/TestDeinterlacingFilter.cpp b/image/test/gtest/TestDeinterlacingFilter.cpp index 30cad7993..82637bbf7 100644 --- a/image/test/gtest/TestDeinterlacingFilter.cpp +++ b/image/test/gtest/TestDeinterlacingFilter.cpp @@ -28,7 +28,7 @@ WithDeinterlacingFilter(const IntSize& aSize, WithFilterPipeline(decoder, Forward(aFunc), DeinterlacingConfig { aProgressiveDisplay }, - SurfaceConfig { decoder, 0, aSize, + SurfaceConfig { decoder, aSize, SurfaceFormat::B8G8R8A8, false }); } @@ -41,7 +41,7 @@ WithPalettedDeinterlacingFilter(const IntSize& aSize, WithFilterPipeline(decoder, Forward(aFunc), DeinterlacingConfig { /* mProgressiveDisplay = */ true }, - PalettedSurfaceConfig { decoder, 0, aSize, + PalettedSurfaceConfig { decoder, aSize, IntRect(0, 0, 100, 100), SurfaceFormat::B8G8R8A8, 8, false }); @@ -55,7 +55,7 @@ AssertConfiguringDeinterlacingFilterFails(const IntSize& aSize) AssertConfiguringPipelineFails(decoder, DeinterlacingConfig { /* mProgressiveDisplay = */ true}, - SurfaceConfig { decoder, 0, aSize, + SurfaceConfig { decoder, aSize, SurfaceFormat::B8G8R8A8, false }); } diff --git a/image/test/gtest/TestDownscalingFilter.cpp b/image/test/gtest/TestDownscalingFilter.cpp index 596becab0..d7aa0ead2 100644 --- a/image/test/gtest/TestDownscalingFilter.cpp +++ b/image/test/gtest/TestDownscalingFilter.cpp @@ -29,7 +29,7 @@ WithDownscalingFilter(const IntSize& aInputSize, WithFilterPipeline(decoder, Forward(aFunc), DownscalingConfig { aInputSize, SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, aOutputSize, + SurfaceConfig { decoder, aOutputSize, SurfaceFormat::B8G8R8A8, false }); } @@ -43,7 +43,7 @@ AssertConfiguringDownscalingFilterFails(const IntSize& aInputSize, AssertConfiguringPipelineFails(decoder, DownscalingConfig { aInputSize, SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, aOutputSize, + SurfaceConfig { decoder, aOutputSize, SurfaceFormat::B8G8R8A8, false }); } @@ -224,7 +224,7 @@ TEST(ImageDownscalingFilter, ConfiguringPalettedDownscaleFails) AssertConfiguringPipelineFails(decoder, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - PalettedSurfaceConfig { decoder, 0, IntSize(20, 20), + PalettedSurfaceConfig { decoder, IntSize(20, 20), IntRect(0, 0, 20, 20), SurfaceFormat::B8G8R8A8, 8, false }); diff --git a/image/test/gtest/TestDownscalingFilterNoSkia.cpp b/image/test/gtest/TestDownscalingFilterNoSkia.cpp index c62ca018d..80928a880 100644 --- a/image/test/gtest/TestDownscalingFilterNoSkia.cpp +++ b/image/test/gtest/TestDownscalingFilterNoSkia.cpp @@ -52,6 +52,6 @@ TEST(ImageDownscalingFilter, NoSkia) AssertConfiguringPipelineFails(decoder, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, IntSize(50, 50), + SurfaceConfig { decoder, IntSize(50, 50), SurfaceFormat::B8G8R8A8, false }); } diff --git a/image/test/gtest/TestRemoveFrameRectFilter.cpp b/image/test/gtest/TestRemoveFrameRectFilter.cpp index e1def590e..ad1f944fc 100644 --- a/image/test/gtest/TestRemoveFrameRectFilter.cpp +++ b/image/test/gtest/TestRemoveFrameRectFilter.cpp @@ -28,7 +28,7 @@ WithRemoveFrameRectFilter(const IntSize& aSize, WithFilterPipeline(decoder, Forward(aFunc), RemoveFrameRectConfig { aFrameRect }, - SurfaceConfig { decoder, 0, aSize, + SurfaceConfig { decoder, aSize, SurfaceFormat::B8G8R8A8, false }); } @@ -41,7 +41,7 @@ AssertConfiguringRemoveFrameRectFilterFails(const IntSize& aSize, AssertConfiguringPipelineFails(decoder, RemoveFrameRectConfig { aFrameRect }, - SurfaceConfig { decoder, 0, aSize, + SurfaceConfig { decoder, aSize, SurfaceFormat::B8G8R8A8, false }); } @@ -320,7 +320,7 @@ TEST(ImageRemoveFrameRectFilter, ConfiguringPalettedRemoveFrameRectFails) // should fail. AssertConfiguringPipelineFails(decoder, RemoveFrameRectConfig { IntRect(0, 0, 50, 50) }, - PalettedSurfaceConfig { decoder, 0, IntSize(100, 100), + PalettedSurfaceConfig { decoder, IntSize(100, 100), IntRect(0, 0, 50, 50), SurfaceFormat::B8G8R8A8, 8, false }); diff --git a/image/test/gtest/TestSurfacePipeIntegration.cpp b/image/test/gtest/TestSurfacePipeIntegration.cpp index 5e8c19fc2..27138a3ee 100644 --- a/image/test/gtest/TestSurfacePipeIntegration.cpp +++ b/image/test/gtest/TestSurfacePipeIntegration.cpp @@ -149,7 +149,7 @@ TEST_F(ImageSurfacePipeIntegration, SurfacePipe) auto sink = MakeUnique(); nsresult rv = - sink->Configure(SurfaceConfig { decoder, 0, IntSize(100, 100), + sink->Configure(SurfaceConfig { decoder, IntSize(100, 100), SurfaceFormat::B8G8R8A8, false }); ASSERT_TRUE(NS_SUCCEEDED(rv)); @@ -227,7 +227,7 @@ TEST_F(ImageSurfacePipeIntegration, PalettedSurfacePipe) auto sink = MakeUnique(); nsresult rv = - sink->Configure(PalettedSurfaceConfig { decoder, 0, IntSize(100, 100), + sink->Configure(PalettedSurfaceConfig { decoder, IntSize(100, 100), IntRect(0, 0, 100, 100), SurfaceFormat::B8G8R8A8, 8, false }); @@ -313,7 +313,7 @@ TEST_F(ImageSurfacePipeIntegration, DeinterlaceDownscaleWritePixels) DeinterlacingConfig { /* mProgressiveDisplay = */ true }, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, IntSize(25, 25), + SurfaceConfig { decoder, IntSize(25, 25), SurfaceFormat::B8G8R8A8, false }); } @@ -369,7 +369,7 @@ TEST_F(ImageSurfacePipeIntegration, RemoveFrameRectBottomRightDownscaleWritePixe RemoveFrameRectConfig { IntRect(50, 50, 100, 100) }, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, IntSize(20, 20), + SurfaceConfig { decoder, IntSize(20, 20), SurfaceFormat::B8G8R8A8, false }); } @@ -403,7 +403,7 @@ TEST_F(ImageSurfacePipeIntegration, RemoveFrameRectTopLeftDownscaleWritePixels) RemoveFrameRectConfig { IntRect(-50, -50, 100, 100) }, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, IntSize(20, 20), + SurfaceConfig { decoder, IntSize(20, 20), SurfaceFormat::B8G8R8A8, false }); } @@ -427,7 +427,7 @@ TEST_F(ImageSurfacePipeIntegration, DeinterlaceRemoveFrameRectWritePixels) WithFilterPipeline(decoder, test, DeinterlacingConfig { /* mProgressiveDisplay = */ true }, RemoveFrameRectConfig { IntRect(50, 50, 100, 100) }, - SurfaceConfig { decoder, 0, IntSize(100, 100), + SurfaceConfig { decoder, IntSize(100, 100), SurfaceFormat::B8G8R8A8, false }); } @@ -450,7 +450,7 @@ TEST_F(ImageSurfacePipeIntegration, DeinterlaceRemoveFrameRectDownscaleWritePixe RemoveFrameRectConfig { IntRect(50, 50, 100, 100) }, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, IntSize(20, 20), + SurfaceConfig { decoder, IntSize(20, 20), SurfaceFormat::B8G8R8A8, false }); } @@ -465,7 +465,7 @@ TEST_F(ImageSurfacePipeIntegration, ConfiguringPalettedRemoveFrameRectDownscaleF RemoveFrameRectConfig { IntRect(0, 0, 50, 50) }, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - PalettedSurfaceConfig { decoder, 0, IntSize(100, 100), + PalettedSurfaceConfig { decoder, IntSize(100, 100), IntRect(0, 0, 50, 50), SurfaceFormat::B8G8R8A8, 8, false }); @@ -482,7 +482,7 @@ TEST_F(ImageSurfacePipeIntegration, ConfiguringPalettedDeinterlaceDownscaleFails DeinterlacingConfig { /* mProgressiveDisplay = */ true}, DownscalingConfig { IntSize(100, 100), SurfaceFormat::B8G8R8A8 }, - PalettedSurfaceConfig { decoder, 0, IntSize(100, 100), + PalettedSurfaceConfig { decoder, IntSize(100, 100), IntRect(0, 0, 20, 20), SurfaceFormat::B8G8R8A8, 8, false }); @@ -503,6 +503,6 @@ TEST_F(ImageSurfacePipeIntegration, ConfiguringHugeDeinterlacingBufferFails) DeinterlacingConfig { /* mProgressiveDisplay = */ true}, DownscalingConfig { IntSize(60000, 60000), SurfaceFormat::B8G8R8A8 }, - SurfaceConfig { decoder, 0, IntSize(600, 600), + SurfaceConfig { decoder, IntSize(600, 600), SurfaceFormat::B8G8R8A8, false }); } diff --git a/image/test/gtest/TestSurfaceSink.cpp b/image/test/gtest/TestSurfaceSink.cpp index ccf9be3ec..3a1c74d12 100644 --- a/image/test/gtest/TestSurfaceSink.cpp +++ b/image/test/gtest/TestSurfaceSink.cpp @@ -32,7 +32,7 @@ WithSurfaceSink(Func aFunc) const bool flipVertically = Orientation == Orient::FLIP_VERTICALLY; WithFilterPipeline(decoder, Forward(aFunc), - SurfaceConfig { decoder, 0, IntSize(100, 100), + SurfaceConfig { decoder, IntSize(100, 100), SurfaceFormat::B8G8R8A8, flipVertically }); } @@ -43,7 +43,7 @@ WithPalettedSurfaceSink(const IntRect& aFrameRect, Func aFunc) ASSERT_TRUE(decoder != nullptr); WithFilterPipeline(decoder, Forward(aFunc), - PalettedSurfaceConfig { decoder, 0, IntSize(100, 100), + PalettedSurfaceConfig { decoder, IntSize(100, 100), aFrameRect, SurfaceFormat::B8G8R8A8, 8, false }); } -- cgit v1.2.3 From 3417e581ce76662131e8089de19b05af20a7fe19 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Wed, 21 Nov 2018 13:34:01 +0100 Subject: Bug 1462355 - Part 1c. Make individual image decoders to use updated Decoder/SurfacePipe methods. Extend this change to nsWebPDecoder.cpp --- image/decoders/nsBMPDecoder.cpp | 3 +-- image/decoders/nsGIFDecoder2.cpp | 25 +++++++++++++++--------- image/decoders/nsIconDecoder.cpp | 3 ++- image/decoders/nsJPEGDecoder.cpp | 4 ++-- image/decoders/nsPNGDecoder.cpp | 41 ++++++++++++++++++++++------------------ image/decoders/nsWebPDecoder.cpp | 6 +++++- 6 files changed, 49 insertions(+), 33 deletions(-) diff --git a/image/decoders/nsBMPDecoder.cpp b/image/decoders/nsBMPDecoder.cpp index 1f0449e4e..42bb3486a 100644 --- a/image/decoders/nsBMPDecoder.cpp +++ b/image/decoders/nsBMPDecoder.cpp @@ -674,8 +674,7 @@ nsBMPDecoder::ReadBitfields(const char* aData, size_t aLength) } MOZ_ASSERT(!mImageData, "Already have a buffer allocated?"); - nsresult rv = AllocateFrame(/* aFrameNum = */ 0, OutputSize(), - FullOutputFrame(), + nsresult rv = AllocateFrame(OutputSize(), FullOutputFrame(), mMayHaveTransparency ? SurfaceFormat::B8G8R8A8 : SurfaceFormat::B8G8R8X8); if (NS_FAILED(rv)) { diff --git a/image/decoders/nsGIFDecoder2.cpp b/image/decoders/nsGIFDecoder2.cpp index 7955438e4..efa145144 100644 --- a/image/decoders/nsGIFDecoder2.cpp +++ b/image/decoders/nsGIFDecoder2.cpp @@ -187,6 +187,14 @@ nsGIFDecoder2::BeginImageFrame(const IntRect& aFrameRect, // Make sure there's no animation if we're downscaling. MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation()); + AnimationParams animParams { + aFrameRect, + FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time), + uint32_t(mGIFStruct.images_decoded), + BlendMethod::OVER, + DisposalMethod(mGIFStruct.disposal_method) + }; + SurfacePipeFlags pipeFlags = aIsInterlaced ? SurfacePipeFlags::DEINTERLACE : SurfacePipeFlags(); @@ -198,17 +206,18 @@ nsGIFDecoder2::BeginImageFrame(const IntRect& aFrameRect, // The first frame is always decoded into an RGB surface. pipe = - SurfacePipeFactory::CreateSurfacePipe(this, mGIFStruct.images_decoded, - Size(), OutputSize(), - aFrameRect, format, pipeFlags); + SurfacePipeFactory::CreateSurfacePipe(this, Size(), OutputSize(), + aFrameRect, format, + Some(animParams), pipeFlags); } else { // This is an animation frame (and not the first). To minimize the memory // usage of animations, the image data is stored in paletted form. MOZ_ASSERT(Size() == OutputSize()); pipe = - SurfacePipeFactory::CreatePalettedSurfacePipe(this, mGIFStruct.images_decoded, - Size(), aFrameRect, format, - aDepth, pipeFlags); + SurfacePipeFactory::CreatePalettedSurfacePipe(this, Size(), aFrameRect, + format, + aDepth, Some(animParams), + pipeFlags); } mCurrentFrameIndex = mGIFStruct.images_decoded; @@ -249,9 +258,7 @@ nsGIFDecoder2::EndImageFrame() mGIFStruct.images_decoded++; // Tell the superclass we finished a frame - PostFrameStop(opacity, - DisposalMethod(mGIFStruct.disposal_method), - FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time)); + PostFrameStop(opacity); // Reset the transparent pixel if (mOldColor) { diff --git a/image/decoders/nsIconDecoder.cpp b/image/decoders/nsIconDecoder.cpp index 9ca63f5ad..b186874c6 100644 --- a/image/decoders/nsIconDecoder.cpp +++ b/image/decoders/nsIconDecoder.cpp @@ -70,8 +70,9 @@ nsIconDecoder::ReadHeader(const char* aData) MOZ_ASSERT(!mImageData, "Already have a buffer allocated?"); Maybe pipe = - SurfacePipeFactory::CreateSurfacePipe(this, 0, Size(), OutputSize(), + SurfacePipeFactory::CreateSurfacePipe(this, Size(), OutputSize(), FullFrame(), SurfaceFormat::B8G8R8A8, + /* aAnimParams */ Nothing(), SurfacePipeFlags()); if (!pipe) { return Transition::TerminateFailure(); diff --git a/image/decoders/nsJPEGDecoder.cpp b/image/decoders/nsJPEGDecoder.cpp index e76ffcbaf..7dac18e27 100644 --- a/image/decoders/nsJPEGDecoder.cpp +++ b/image/decoders/nsJPEGDecoder.cpp @@ -388,8 +388,8 @@ nsJPEGDecoder::ReadJPEGData(const char* aData, size_t aLength) jpeg_has_multiple_scans(&mInfo); MOZ_ASSERT(!mImageData, "Already have a buffer allocated?"); - nsresult rv = AllocateFrame(/* aFrameNum = */ 0, OutputSize(), - FullOutputFrame(), SurfaceFormat::B8G8R8X8); + nsresult rv = AllocateFrame(OutputSize(), FullOutputFrame(), + SurfaceFormat::B8G8R8X8); if (NS_FAILED(rv)) { mState = JPEG_ERROR; MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, diff --git a/image/decoders/nsPNGDecoder.cpp b/image/decoders/nsPNGDecoder.cpp index 9596ae7d6..1f19c41bc 100644 --- a/image/decoders/nsPNGDecoder.cpp +++ b/image/decoders/nsPNGDecoder.cpp @@ -208,6 +208,25 @@ nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) MOZ_ASSERT_IF(Size() != OutputSize(), transparency != TransparencyType::eFrameRect); + Maybe animParams; +#ifdef PNG_APNG_SUPPORTED + if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) { + mAnimInfo = AnimFrameInfo(mPNG, mInfo); + + if (mAnimInfo.mDispose == DisposalMethod::CLEAR) { + // We may have to display the background under this image during + // animation playback, so we regard it as transparent. + PostHasTransparency(); + } + + animParams.emplace(AnimationParams { + aFrameInfo.mFrameRect, + FrameTimeout::FromRawMilliseconds(mAnimInfo.mTimeout), + mNumFrames, mAnimInfo.mBlend, mAnimInfo.mDispose + }); + } +#endif + // If this image is interlaced, we can display better quality intermediate // results to the user by post processing them with ADAM7InterpolatingFilter. SurfacePipeFlags pipeFlags = aFrameInfo.mIsInterlaced @@ -220,9 +239,9 @@ nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) } Maybe pipe = - SurfacePipeFactory::CreateSurfacePipe(this, mNumFrames, Size(), - OutputSize(), aFrameInfo.mFrameRect, - format, pipeFlags); + SurfacePipeFactory::CreateSurfacePipe(this, Size(), OutputSize(), + aFrameInfo.mFrameRect, format, + animParams, pipeFlags); if (!pipe) { mPipe = SurfacePipe(); @@ -239,18 +258,6 @@ nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) "image frame with %dx%d pixels for decoder %p", mFrameRect.width, mFrameRect.height, this)); -#ifdef PNG_APNG_SUPPORTED - if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) { - mAnimInfo = AnimFrameInfo(mPNG, mInfo); - - if (mAnimInfo.mDispose == DisposalMethod::CLEAR) { - // We may have to display the background under this image during - // animation playback, so we regard it as transparent. - PostHasTransparency(); - } - } -#endif - return NS_OK; } @@ -269,9 +276,7 @@ nsPNGDecoder::EndImageFrame() opacity = Opacity::FULLY_OPAQUE; } - PostFrameStop(opacity, mAnimInfo.mDispose, - FrameTimeout::FromRawMilliseconds(mAnimInfo.mTimeout), - mAnimInfo.mBlend, Some(mFrameRect)); + PostFrameStop(opacity); } nsresult diff --git a/image/decoders/nsWebPDecoder.cpp b/image/decoders/nsWebPDecoder.cpp index 4d6566486..4f3cc8b2a 100644 --- a/image/decoders/nsWebPDecoder.cpp +++ b/image/decoders/nsWebPDecoder.cpp @@ -234,8 +234,12 @@ nsWebPDecoder::CreateFrame(const nsIntRect& aFrameRect) SurfacePipeFlags pipeFlags = SurfacePipeFlags(); + AnimationParams animParams { + aFrameRect, mTimeout, mCurrentFrame, mBlend, mDisposal + }; + Maybe pipe = SurfacePipeFactory::CreateSurfacePipe(this, - mCurrentFrame, Size(), OutputSize(), aFrameRect, mFormat, pipeFlags); + Size(), OutputSize(), aFrameRect, mFormat, Some(animParams), pipeFlags); if (!pipe) { MOZ_LOG(sWebPLog, LogLevel::Error, ("[this=%p] nsWebPDecoder::CreateFrame -- no pipe\n", this)); -- cgit v1.2.3 From b9003c9c5d3c86f941dbf50de1cf8253072a9c85 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Wed, 21 Nov 2018 14:03:16 +0100 Subject: Fix pasta error --- image/AnimationParams.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/image/AnimationParams.h b/image/AnimationParams.h index 916ba7e40..dc403a4e8 100644 --- a/image/AnimationParams.h +++ b/image/AnimationParams.h @@ -43,3 +43,5 @@ struct AnimationParams } // namespace image } // namespace mozilla + +#endif // mozilla_image_AnimationParams_h -- cgit v1.2.3 From 5b821064f29962f80d6a683fcdf1a163c18fbf99 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Wed, 21 Nov 2018 14:08:00 +0100 Subject: Unrefactor mRawVeggies back to mVBuffMeat ;P (mRawSurface -> mVBuf) --- image/imgFrame.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/image/imgFrame.cpp b/image/imgFrame.cpp index af84d8cbb..c9f44181d 100644 --- a/image/imgFrame.cpp +++ b/image/imgFrame.cpp @@ -252,13 +252,13 @@ imgFrame::InitForDecoder(const nsIntSize& aImageSize, } else { MOZ_ASSERT(!mImageSurface, "Called imgFrame::InitForDecoder() twice?"); - mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat); - if (!mRawSurface) { + mVBuf = AllocateBufferForImage(mFrameRect.Size(), mFormat); + if (!mVBuf) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } - mImageSurface = CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat); + mImageSurface = CreateLockedSurface(mVBuf, mFrameRect.Size(), mFormat); if (!mImageSurface) { NS_WARNING("Failed to create ImageSurface"); @@ -266,7 +266,7 @@ imgFrame::InitForDecoder(const nsIntSize& aImageSize, return NS_ERROR_OUT_OF_MEMORY; } - if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) { + if (!ClearSurface(mVBuf, mFrameRect.Size(), mFormat)) { NS_WARNING("Could not clear allocated buffer"); mAborted = true; return NS_ERROR_OUT_OF_MEMORY; -- cgit v1.2.3 From a51993521f26a0175c6b474f6cad30eb8f8a0f86 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Wed, 21 Nov 2018 14:08:47 +0100 Subject: Split out FrameTimeout into its own header file for re-use. --- image/FrameTimeout.h | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++ image/imgFrame.h | 100 ------------------------------------------- 2 files changed, 119 insertions(+), 100 deletions(-) create mode 100644 image/FrameTimeout.h diff --git a/image/FrameTimeout.h b/image/FrameTimeout.h new file mode 100644 index 000000000..4070bba65 --- /dev/null +++ b/image/FrameTimeout.h @@ -0,0 +1,119 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_image_FrameTimeout_h +#define mozilla_image_FrameTimeout_h + +#include +#include "mozilla/Assertions.h" + +namespace mozilla { +namespace image { + +/** + * FrameTimeout wraps a frame timeout value (measured in milliseconds) after + * first normalizing it. This normalization is necessary because some tools + * generate incorrect frame timeout values which we nevertheless have to + * support. For this reason, code that deals with frame timeouts should always + * use a FrameTimeout value rather than the raw value from the image header. + */ +struct FrameTimeout +{ + /** + * @return a FrameTimeout of zero. This should be used only for math + * involving FrameTimeout values. You can't obtain a zero FrameTimeout from + * FromRawMilliseconds(). + */ + static FrameTimeout Zero() { return FrameTimeout(0); } + + /// @return an infinite FrameTimeout. + static FrameTimeout Forever() { return FrameTimeout(-1); } + + /// @return a FrameTimeout obtained by normalizing a raw timeout value. + static FrameTimeout FromRawMilliseconds(int32_t aRawMilliseconds) + { + // Normalize all infinite timeouts to the same value. + if (aRawMilliseconds < 0) { + return FrameTimeout::Forever(); + } + + // Very small timeout values are problematic for two reasons: we don't want + // to burn energy redrawing animated images extremely fast, and broken tools + // generate these values when they actually want a "default" value, so such + // images won't play back right without normalization. For some context, + // see bug 890743, bug 125137, bug 139677, and bug 207059. The historical + // behavior of IE and Opera was: + // IE 6/Win: + // 10 - 50ms is normalized to 100ms. + // >50ms is used unnormalized. + // Opera 7 final/Win: + // 10ms is normalized to 100ms. + // >10ms is used unnormalized. + if (aRawMilliseconds >= 0 && aRawMilliseconds <= 10 ) { + return FrameTimeout(100); + } + + // The provided timeout value is OK as-is. + return FrameTimeout(aRawMilliseconds); + } + + bool operator==(const FrameTimeout& aOther) const + { + return mTimeout == aOther.mTimeout; + } + + bool operator!=(const FrameTimeout& aOther) const { return !(*this == aOther); } + + FrameTimeout operator+(const FrameTimeout& aOther) + { + if (*this == Forever() || aOther == Forever()) { + return Forever(); + } + + return FrameTimeout(mTimeout + aOther.mTimeout); + } + + FrameTimeout& operator+=(const FrameTimeout& aOther) + { + *this = *this + aOther; + return *this; + } + + /** + * @return this FrameTimeout's value in milliseconds. Illegal to call on a + * an infinite FrameTimeout value. + */ + uint32_t AsMilliseconds() const + { + if (*this == Forever()) { + MOZ_ASSERT_UNREACHABLE("Calling AsMilliseconds() on an infinite FrameTimeout"); + return 100; // Fail to something sane. + } + + return uint32_t(mTimeout); + } + + /** + * @return this FrameTimeout value encoded so that non-negative values + * represent a timeout in milliseconds, and -1 represents an infinite + * timeout. + * + * XXX(seth): This is a backwards compatibility hack that should be removed. + */ + int32_t AsEncodedValueDeprecated() const { return mTimeout; } + +private: + explicit FrameTimeout(int32_t aTimeout) + : mTimeout(aTimeout) + { } + + int32_t mTimeout; +}; + +} // namespace image +} // namespace mozilla + +#endif // mozilla_image_FrameTimeout_h diff --git a/image/imgFrame.h b/image/imgFrame.h index 32aabacae..928f6ad86 100644 --- a/image/imgFrame.h +++ b/image/imgFrame.h @@ -29,106 +29,6 @@ enum class Opacity : uint8_t { SOME_TRANSPARENCY }; -/** - * FrameTimeout wraps a frame timeout value (measured in milliseconds) after - * first normalizing it. This normalization is necessary because some tools - * generate incorrect frame timeout values which we nevertheless have to - * support. For this reason, code that deals with frame timeouts should always - * use a FrameTimeout value rather than the raw value from the image header. - */ -struct FrameTimeout -{ - /** - * @return a FrameTimeout of zero. This should be used only for math - * involving FrameTimeout values. You can't obtain a zero FrameTimeout from - * FromRawMilliseconds(). - */ - static FrameTimeout Zero() { return FrameTimeout(0); } - - /// @return an infinite FrameTimeout. - static FrameTimeout Forever() { return FrameTimeout(-1); } - - /// @return a FrameTimeout obtained by normalizing a raw timeout value. - static FrameTimeout FromRawMilliseconds(int32_t aRawMilliseconds) - { - // Normalize all infinite timeouts to the same value. - if (aRawMilliseconds < 0) { - return FrameTimeout::Forever(); - } - - // Very small timeout values are problematic for two reasons: we don't want - // to burn energy redrawing animated images extremely fast, and broken tools - // generate these values when they actually want a "default" value, so such - // images won't play back right without normalization. For some context, - // see bug 890743, bug 125137, bug 139677, and bug 207059. The historical - // behavior of IE and Opera was: - // IE 6/Win: - // 10 - 50ms is normalized to 100ms. - // >50ms is used unnormalized. - // Opera 7 final/Win: - // 10ms is normalized to 100ms. - // >10ms is used unnormalized. - if (aRawMilliseconds >= 0 && aRawMilliseconds <= 10 ) { - return FrameTimeout(100); - } - - // The provided timeout value is OK as-is. - return FrameTimeout(aRawMilliseconds); - } - - bool operator==(const FrameTimeout& aOther) const - { - return mTimeout == aOther.mTimeout; - } - - bool operator!=(const FrameTimeout& aOther) const { return !(*this == aOther); } - - FrameTimeout operator+(const FrameTimeout& aOther) - { - if (*this == Forever() || aOther == Forever()) { - return Forever(); - } - - return FrameTimeout(mTimeout + aOther.mTimeout); - } - - FrameTimeout& operator+=(const FrameTimeout& aOther) - { - *this = *this + aOther; - return *this; - } - - /** - * @return this FrameTimeout's value in milliseconds. Illegal to call on a - * an infinite FrameTimeout value. - */ - uint32_t AsMilliseconds() const - { - if (*this == Forever()) { - MOZ_ASSERT_UNREACHABLE("Calling AsMilliseconds() on an infinite FrameTimeout"); - return 100; // Fail to something sane. - } - - return uint32_t(mTimeout); - } - - /** - * @return this FrameTimeout value encoded so that non-negative values - * represent a timeout in milliseconds, and -1 represents an infinite - * timeout. - * - * XXX(seth): This is a backwards compatibility hack that should be removed. - */ - int32_t AsEncodedValueDeprecated() const { return mTimeout; } - -private: - explicit FrameTimeout(int32_t aTimeout) - : mTimeout(aTimeout) - { } - - int32_t mTimeout; -}; - /** * AnimationData contains all of the information necessary for using an imgFrame * as part of an animation. -- cgit v1.2.3 From 25f4c75d68629a26cbb5433975027cd691a18dd7 Mon Sep 17 00:00:00 2001 From: wolfbeast Date: Wed, 21 Nov 2018 14:22:29 +0100 Subject: Fix blank pixel color for truncated GIFs --- image/decoders/nsGIFDecoder2.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/image/decoders/nsGIFDecoder2.cpp b/image/decoders/nsGIFDecoder2.cpp index efa145144..6f2be1fa1 100644 --- a/image/decoders/nsGIFDecoder2.cpp +++ b/image/decoders/nsGIFDecoder2.cpp @@ -212,10 +212,16 @@ nsGIFDecoder2::BeginImageFrame(const IntRect& aFrameRect, } else { // This is an animation frame (and not the first). To minimize the memory // usage of animations, the image data is stored in paletted form. + // + // We should never use paletted surfaces with a draw target directly, so + // the only practical difference between B8G8R8A8 and B8G8R8X8 is the + // cleared pixel value if we get truncated. We want 0 in that case to + // ensure it is an acceptable value for the color map as was the case + // historically. MOZ_ASSERT(Size() == OutputSize()); pipe = SurfacePipeFactory::CreatePalettedSurfacePipe(this, Size(), aFrameRect, - format, + SurfaceFormat::B8G8R8A8, aDepth, Some(animParams), pipeFlags); } -- cgit v1.2.3