summaryrefslogtreecommitdiffstats
path: root/dom/media/platforms/apple
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /dom/media/platforms/apple
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'dom/media/platforms/apple')
-rw-r--r--dom/media/platforms/apple/AppleATDecoder.cpp722
-rw-r--r--dom/media/platforms/apple/AppleATDecoder.h78
-rw-r--r--dom/media/platforms/apple/AppleCMFunctions.h12
-rw-r--r--dom/media/platforms/apple/AppleCMLinker.cpp104
-rw-r--r--dom/media/platforms/apple/AppleCMLinker.h46
-rw-r--r--dom/media/platforms/apple/AppleDecoderModule.cpp113
-rw-r--r--dom/media/platforms/apple/AppleDecoderModule.h48
-rw-r--r--dom/media/platforms/apple/AppleUtils.h98
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.cpp674
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.h126
-rw-r--r--dom/media/platforms/apple/AppleVTFunctions.h14
-rw-r--r--dom/media/platforms/apple/AppleVTLinker.cpp104
-rw-r--r--dom/media/platforms/apple/AppleVTLinker.h46
-rw-r--r--dom/media/platforms/apple/VideoToolbox/VideoToolbox.h94
14 files changed, 2279 insertions, 0 deletions
diff --git a/dom/media/platforms/apple/AppleATDecoder.cpp b/dom/media/platforms/apple/AppleATDecoder.cpp
new file mode 100644
index 000000000..65794d82c
--- /dev/null
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -0,0 +1,722 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AppleUtils.h"
+#include "MP4Decoder.h"
+#include "mp4_demuxer/Adts.h"
+#include "MediaInfo.h"
+#include "AppleATDecoder.h"
+#include "mozilla/Logging.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/UniquePtr.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+#define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
+
+namespace mozilla {
+
+AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback)
+ : mConfig(aConfig)
+ , mFileStreamError(false)
+ , mTaskQueue(aTaskQueue)
+ , mCallback(aCallback)
+ , mConverter(nullptr)
+ , mStream(nullptr)
+ , mIsFlushing(false)
+ , mParsedFramesForAACMagicCookie(0)
+ , mErrored(false)
+{
+ MOZ_COUNT_CTOR(AppleATDecoder);
+ LOG("Creating Apple AudioToolbox decoder");
+ LOG("Audio Decoder configuration: %s %d Hz %d channels %d bits per channel",
+ mConfig.mMimeType.get(),
+ mConfig.mRate,
+ mConfig.mChannels,
+ mConfig.mBitDepth);
+
+ if (mConfig.mMimeType.EqualsLiteral("audio/mpeg")) {
+ mFormatID = kAudioFormatMPEGLayer3;
+ } else if (mConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ mFormatID = kAudioFormatMPEG4AAC;
+ } else {
+ mFormatID = 0;
+ }
+}
+
+AppleATDecoder::~AppleATDecoder()
+{
+ MOZ_COUNT_DTOR(AppleATDecoder);
+ MOZ_ASSERT(!mConverter);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+AppleATDecoder::Init()
+{
+ if (!mFormatID) {
+ NS_ERROR("Non recognised format");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
+}
+
+void
+AppleATDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio",
+ aSample,
+ aSample->mDuration,
+ aSample->mTime,
+ aSample->mKeyframe ? " keyframe" : "",
+ (unsigned long long)aSample->Size());
+
+ // Queue a task to perform the actual decoding on a separate thread.
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod<RefPtr<MediaRawData>>(
+ this,
+ &AppleATDecoder::SubmitSample,
+ RefPtr<MediaRawData>(aSample));
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+void
+AppleATDecoder::ProcessFlush()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mQueuedSamples.Clear();
+ if (mConverter) {
+ OSStatus rv = AudioConverterReset(mConverter);
+ if (rv) {
+ LOG("Error %d resetting AudioConverter", rv);
+ }
+ }
+ if (mErrored) {
+ mParsedFramesForAACMagicCookie = 0;
+ mMagicCookie.Clear();
+ ProcessShutdown();
+ mErrored = false;
+ }
+}
+
+void
+AppleATDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ LOG("Flushing AudioToolbox AAC decoder");
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleATDecoder::ProcessFlush);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+}
+
+void
+AppleATDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ LOG("Draining AudioToolbox AAC decoder");
+ mTaskQueue->AwaitIdle();
+ mCallback->DrainComplete();
+ Flush();
+}
+
+void
+AppleATDecoder::Shutdown()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleATDecoder::ProcessShutdown);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+}
+
+void
+AppleATDecoder::ProcessShutdown()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ if (mStream) {
+ OSStatus rv = AudioFileStreamClose(mStream);
+ if (rv) {
+ LOG("error %d disposing of AudioFileStream", rv);
+ return;
+ }
+ mStream = nullptr;
+ }
+
+ if (mConverter) {
+ LOG("Shutdown: Apple AudioToolbox AAC decoder");
+ OSStatus rv = AudioConverterDispose(mConverter);
+ if (rv) {
+ LOG("error %d disposing of AudioConverter", rv);
+ }
+ mConverter = nullptr;
+ }
+}
+
+struct PassthroughUserData {
+ UInt32 mChannels;
+ UInt32 mDataSize;
+ const void* mData;
+ AudioStreamPacketDescription mPacket;
+};
+
+// Error value we pass through the decoder to signal that nothing
+// has gone wrong during decoding and we're done processing the packet.
+const uint32_t kNoMoreDataErr = 'MOAR';
+
+static OSStatus
+_PassthroughInputDataCallback(AudioConverterRef aAudioConverter,
+ UInt32* aNumDataPackets /* in/out */,
+ AudioBufferList* aData /* in/out */,
+ AudioStreamPacketDescription** aPacketDesc,
+ void* aUserData)
+{
+ PassthroughUserData* userData = (PassthroughUserData*)aUserData;
+ if (!userData->mDataSize) {
+ *aNumDataPackets = 0;
+ return kNoMoreDataErr;
+ }
+
+ if (aPacketDesc) {
+ userData->mPacket.mStartOffset = 0;
+ userData->mPacket.mVariableFramesInPacket = 0;
+ userData->mPacket.mDataByteSize = userData->mDataSize;
+ *aPacketDesc = &userData->mPacket;
+ }
+
+ aData->mBuffers[0].mNumberChannels = userData->mChannels;
+ aData->mBuffers[0].mDataByteSize = userData->mDataSize;
+ aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
+
+ // No more data to provide following this run.
+ userData->mDataSize = 0;
+
+ return noErr;
+}
+
+void
+AppleATDecoder::SubmitSample(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ if (mIsFlushing) {
+ return;
+ }
+
+ MediaResult rv = NS_OK;
+ if (!mConverter) {
+ rv = SetupDecoder(aSample);
+ if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
+ mCallback->Error(rv);
+ return;
+ }
+ }
+
+ mQueuedSamples.AppendElement(aSample);
+
+ if (rv == NS_OK) {
+ for (size_t i = 0; i < mQueuedSamples.Length(); i++) {
+ rv = DecodeSample(mQueuedSamples[i]);
+ if (NS_FAILED(rv)) {
+ mErrored = true;
+ mCallback->Error(rv);
+ return;
+ }
+ }
+ mQueuedSamples.Clear();
+ }
+ mCallback->InputExhausted();
+}
+
+MediaResult
+AppleATDecoder::DecodeSample(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Array containing the queued decoded audio frames, about to be output.
+ nsTArray<AudioDataValue> outputData;
+ UInt32 channels = mOutputFormat.mChannelsPerFrame;
+ // Pick a multiple of the frame size close to a power of two
+ // for efficient allocation.
+ const uint32_t MAX_AUDIO_FRAMES = 128;
+ const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;
+
+ // Descriptions for _decompressed_ audio packets. ignored.
+ auto packets = MakeUnique<AudioStreamPacketDescription[]>(MAX_AUDIO_FRAMES);
+
+ // This API insists on having packets spoon-fed to it from a callback.
+ // This structure exists only to pass our state.
+ PassthroughUserData userData =
+ { channels, (UInt32)aSample->Size(), aSample->Data() };
+
+ // Decompressed audio buffer
+ AlignedAudioBuffer decoded(maxDecodedSamples);
+ if (!decoded) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ do {
+ AudioBufferList decBuffer;
+ decBuffer.mNumberBuffers = 1;
+ decBuffer.mBuffers[0].mNumberChannels = channels;
+ decBuffer.mBuffers[0].mDataByteSize =
+ maxDecodedSamples * sizeof(AudioDataValue);
+ decBuffer.mBuffers[0].mData = decoded.get();
+
+ // in: the max number of packets we can handle from the decoder.
+ // out: the number of packets the decoder is actually returning.
+ UInt32 numFrames = MAX_AUDIO_FRAMES;
+
+ OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
+ _PassthroughInputDataCallback,
+ &userData,
+ &numFrames /* in/out */,
+ &decBuffer,
+ packets.get());
+
+ if (rv && rv != kNoMoreDataErr) {
+ LOG("Error decoding audio sample: %d\n", rv);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
+ rv, aSample->mTime));
+ }
+
+ if (numFrames) {
+ outputData.AppendElements(decoded.get(), numFrames * channels);
+ }
+
+ if (rv == kNoMoreDataErr) {
+ break;
+ }
+ } while (true);
+
+ if (outputData.IsEmpty()) {
+ return NS_OK;
+ }
+
+ size_t numFrames = outputData.Length() / channels;
+ int rate = mOutputFormat.mSampleRate;
+ media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
+ if (!duration.IsValid()) {
+ NS_WARNING("Invalid count of accumulated audio samples");
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL(
+ "Invalid count of accumulated audio samples: num:%llu rate:%d",
+ uint64_t(numFrames), rate));
+ }
+
+#ifdef LOG_SAMPLE_DECODE
+ LOG("pushed audio at time %lfs; duration %lfs\n",
+ (double)aSample->mTime / USECS_PER_S,
+ duration.ToSeconds());
+#endif
+
+ AudioSampleBuffer data(outputData.Elements(), outputData.Length());
+ if (!data.Data()) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ if (mChannelLayout && !mAudioConverter) {
+ AudioConfig in(*mChannelLayout.get(), rate);
+ AudioConfig out(channels, rate);
+ if (!in.IsValid() || !out.IsValid()) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid audio config"));
+ }
+ mAudioConverter = MakeUnique<AudioConverter>(in, out);
+ }
+ if (mAudioConverter) {
+ MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
+ data = mAudioConverter->Process(Move(data));
+ }
+
+ RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
+ aSample->mTime,
+ duration.ToMicroseconds(),
+ numFrames,
+ data.Forget(),
+ channels,
+ rate);
+ mCallback->Output(audio);
+ return NS_OK;
+}
+
+MediaResult
+AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
+ const nsTArray<uint8_t>& aExtraData)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Request the properties from CoreAudio using the codec magic cookie
+ AudioFormatInfo formatInfo;
+ PodZero(&formatInfo.mASBD);
+ formatInfo.mASBD.mFormatID = mFormatID;
+ if (mFormatID == kAudioFormatMPEG4AAC) {
+ formatInfo.mASBD.mFormatFlags = mConfig.mExtendedProfile;
+ }
+ formatInfo.mMagicCookieSize = aExtraData.Length();
+ formatInfo.mMagicCookie = aExtraData.Elements();
+
+ UInt32 formatListSize;
+ // Attempt to retrieve the default format using
+ // kAudioFormatProperty_FormatInfo method.
+ // This method only retrieves the FramesPerPacket information required
+ // by the decoder, which depends on the codec type and profile.
+ aDesc.mFormatID = mFormatID;
+ aDesc.mChannelsPerFrame = mConfig.mChannels;
+ aDesc.mSampleRate = mConfig.mRate;
+ UInt32 inputFormatSize = sizeof(aDesc);
+ OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
+ 0,
+ NULL,
+ &inputFormatSize,
+ &aDesc);
+ if (NS_WARN_IF(rv)) {
+ return MediaResult(
+ NS_ERROR_FAILURE,
+ RESULT_DETAIL("Unable to get format info:%lld", int64_t(rv)));
+ }
+
+ // If any of the methods below fail, we will return the default format as
+ // created using kAudioFormatProperty_FormatInfo above.
+ rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList,
+ sizeof(formatInfo),
+ &formatInfo,
+ &formatListSize);
+ if (rv || (formatListSize % sizeof(AudioFormatListItem))) {
+ return NS_OK;
+ }
+ size_t listCount = formatListSize / sizeof(AudioFormatListItem);
+ auto formatList = MakeUnique<AudioFormatListItem[]>(listCount);
+
+ rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList,
+ sizeof(formatInfo),
+ &formatInfo,
+ &formatListSize,
+ formatList.get());
+ if (rv) {
+ return NS_OK;
+ }
+ LOG("found %u available audio stream(s)",
+ formatListSize / sizeof(AudioFormatListItem));
+ // Get the index number of the first playable format.
+ // This index number will be for the highest quality layer the platform
+ // is capable of playing.
+ UInt32 itemIndex;
+ UInt32 indexSize = sizeof(itemIndex);
+ rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList,
+ formatListSize,
+ formatList.get(),
+ &indexSize,
+ &itemIndex);
+ if (rv) {
+ return NS_OK;
+ }
+
+ aDesc = formatList[itemIndex].mASBD;
+
+ return NS_OK;
+}
+
+AudioConfig::Channel
+ConvertChannelLabel(AudioChannelLabel id)
+{
+ switch (id) {
+ case kAudioChannelLabel_Mono:
+ return AudioConfig::CHANNEL_MONO;
+ case kAudioChannelLabel_Left:
+ return AudioConfig::CHANNEL_LEFT;
+ case kAudioChannelLabel_Right:
+ return AudioConfig::CHANNEL_RIGHT;
+ case kAudioChannelLabel_Center:
+ return AudioConfig::CHANNEL_CENTER;
+ case kAudioChannelLabel_LFEScreen:
+ return AudioConfig::CHANNEL_LFE;
+ case kAudioChannelLabel_LeftSurround:
+ return AudioConfig::CHANNEL_LS;
+ case kAudioChannelLabel_RightSurround:
+ return AudioConfig::CHANNEL_RS;
+ case kAudioChannelLabel_CenterSurround:
+ return AudioConfig::CHANNEL_RCENTER;
+ case kAudioChannelLabel_RearSurroundLeft:
+ return AudioConfig::CHANNEL_RLS;
+ case kAudioChannelLabel_RearSurroundRight:
+ return AudioConfig::CHANNEL_RRS;
+ default:
+ return AudioConfig::CHANNEL_INVALID;
+ }
+}
+
+// Will set mChannelLayout if a channel layout could properly be identified
+// and is supported.
+nsresult
+AppleATDecoder::SetupChannelLayout()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Determine the channel layout.
+ UInt32 propertySize;
+ UInt32 size;
+ OSStatus status =
+ AudioConverterGetPropertyInfo(mConverter,
+ kAudioConverterOutputChannelLayout,
+ &propertySize, NULL);
+ if (status || !propertySize) {
+ LOG("Couldn't get channel layout property (%s)", FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+
+ auto data = MakeUnique<uint8_t[]>(propertySize);
+ size = propertySize;
+ status =
+ AudioConverterGetProperty(mConverter, kAudioConverterInputChannelLayout,
+ &size, data.get());
+ if (status || size != propertySize) {
+ LOG("Couldn't get channel layout property (%s)",
+ FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioChannelLayout* layout =
+ reinterpret_cast<AudioChannelLayout*>(data.get());
+ AudioChannelLayoutTag tag = layout->mChannelLayoutTag;
+
+ // if tag is kAudioChannelLayoutTag_UseChannelDescriptions then the structure
+ // directly contains the the channel layout mapping.
+ // If tag is kAudioChannelLayoutTag_UseChannelBitmap then the layout will
+ // be defined via the bitmap and can be retrieved using
+ // kAudioFormatProperty_ChannelLayoutForBitmap property.
+ // Otherwise the tag itself describes the layout.
+ if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) {
+ AudioFormatPropertyID property =
+ tag == kAudioChannelLayoutTag_UseChannelBitmap
+ ? kAudioFormatProperty_ChannelLayoutForBitmap
+ : kAudioFormatProperty_ChannelLayoutForTag;
+
+ if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
+ status =
+ AudioFormatGetPropertyInfo(property,
+ sizeof(UInt32), &layout->mChannelBitmap,
+ &propertySize);
+ } else {
+ status =
+ AudioFormatGetPropertyInfo(property,
+ sizeof(AudioChannelLayoutTag), &tag,
+ &propertySize);
+ }
+ if (status || !propertySize) {
+ LOG("Couldn't get channel layout property info (%s:%s)",
+ FourCC2Str(property), FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+ data = MakeUnique<uint8_t[]>(propertySize);
+ layout = reinterpret_cast<AudioChannelLayout*>(data.get());
+ size = propertySize;
+
+ if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
+ status = AudioFormatGetProperty(property,
+ sizeof(UInt32), &layout->mChannelBitmap,
+ &size, layout);
+ } else {
+ status = AudioFormatGetProperty(property,
+ sizeof(AudioChannelLayoutTag), &tag,
+ &size, layout);
+ }
+ if (status || size != propertySize) {
+ LOG("Couldn't get channel layout property (%s:%s)",
+ FourCC2Str(property), FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+ // We have retrieved the channel layout from the tag or bitmap.
+ // We can now directly use the channel descriptions.
+ layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
+ }
+
+ if (layout->mNumberChannelDescriptions > MAX_AUDIO_CHANNELS ||
+ layout->mNumberChannelDescriptions != mOutputFormat.mChannelsPerFrame) {
+ LOG("Nonsensical channel layout or not matching the original channel number");
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioConfig::Channel channels[MAX_AUDIO_CHANNELS];
+ for (uint32_t i = 0; i < layout->mNumberChannelDescriptions; i++) {
+ AudioChannelLabel id = layout->mChannelDescriptions[i].mChannelLabel;
+ AudioConfig::Channel channel = ConvertChannelLabel(id);
+ channels[i] = channel;
+ }
+ mChannelLayout =
+ MakeUnique<AudioConfig::ChannelLayout>(mOutputFormat.mChannelsPerFrame,
+ channels);
+ return NS_OK;
+}
+
+MediaResult
+AppleATDecoder::SetupDecoder(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ static const uint32_t MAX_FRAMES = 2;
+
+ if (mFormatID == kAudioFormatMPEG4AAC &&
+ mConfig.mExtendedProfile == 2 &&
+ mParsedFramesForAACMagicCookie < MAX_FRAMES) {
+ // Check for implicit SBR signalling if stream is AAC-LC
+ // This will provide us with an updated magic cookie for use with
+ // GetInputAudioDescription.
+ if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
+ !mMagicCookie.Length()) {
+ // nothing found yet, will try again later
+ mParsedFramesForAACMagicCookie++;
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ // An error occurred, fallback to using default stream description
+ }
+
+ LOG("Initializing Apple AudioToolbox decoder");
+
+ AudioStreamBasicDescription inputFormat;
+ PodZero(&inputFormat);
+ MediaResult rv =
+ GetInputAudioDescription(inputFormat,
+ mMagicCookie.Length() ?
+ mMagicCookie : *mConfig.mExtraData);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ // Fill in the output format manually.
+ PodZero(&mOutputFormat);
+ mOutputFormat.mFormatID = kAudioFormatLinearPCM;
+ mOutputFormat.mSampleRate = inputFormat.mSampleRate;
+ mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
+#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
+ mOutputFormat.mBitsPerChannel = 32;
+ mOutputFormat.mFormatFlags =
+ kLinearPCMFormatFlagIsFloat |
+ 0;
+#elif defined(MOZ_SAMPLE_TYPE_S16)
+ mOutputFormat.mBitsPerChannel = 16;
+ mOutputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | 0;
+#else
+# error Unknown audio sample type
+#endif
+ // Set up the decoder so it gives us one sample per frame
+ mOutputFormat.mFramesPerPacket = 1;
+ mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
+ = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
+
+ OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
+ if (status) {
+ LOG("Error %d constructing AudioConverter", status);
+ mConverter = nullptr;
+ return MediaResult(
+ NS_ERROR_FAILURE,
+ RESULT_DETAIL("Error constructing AudioConverter:%lld", int64_t(status)));
+ }
+
+ if (NS_FAILED(SetupChannelLayout())) {
+ NS_WARNING("Couldn't retrieve channel layout, will use default layout");
+ }
+
+ return NS_OK;
+}
+
+static void
+_MetadataCallback(void* aAppleATDecoder,
+ AudioFileStreamID aStream,
+ AudioFileStreamPropertyID aProperty,
+ UInt32* aFlags)
+{
+ AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
+ LOG("MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
+ if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
+ UInt32 size;
+ Boolean writeable;
+ OSStatus rv = AudioFileStreamGetPropertyInfo(aStream,
+ aProperty,
+ &size,
+ &writeable);
+ if (rv) {
+ LOG("Couldn't get property info for '%s' (%s)",
+ FourCC2Str(aProperty), FourCC2Str(rv));
+ decoder->mFileStreamError = true;
+ return;
+ }
+ auto data = MakeUnique<uint8_t[]>(size);
+ rv = AudioFileStreamGetProperty(aStream, aProperty,
+ &size, data.get());
+ if (rv) {
+ LOG("Couldn't get property '%s' (%s)",
+ FourCC2Str(aProperty), FourCC2Str(rv));
+ decoder->mFileStreamError = true;
+ return;
+ }
+ decoder->mMagicCookie.AppendElements(data.get(), size);
+ }
+}
+
+static void
+_SampleCallback(void* aSBR,
+ UInt32 aNumBytes,
+ UInt32 aNumPackets,
+ const void* aData,
+ AudioStreamPacketDescription* aPackets)
+{
+}
+
+nsresult
+AppleATDecoder::GetImplicitAACMagicCookie(const MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Prepend ADTS header to AAC audio.
+ RefPtr<MediaRawData> adtssample(aSample->Clone());
+ if (!adtssample) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ int8_t frequency_index =
+ mp4_demuxer::Adts::GetFrequencyIndex(mConfig.mRate);
+
+ bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.mChannels,
+ frequency_index,
+ mConfig.mProfile,
+ adtssample);
+ if (!rv) {
+ NS_WARNING("Failed to apply ADTS header");
+ return NS_ERROR_FAILURE;
+ }
+ if (!mStream) {
+ OSStatus rv = AudioFileStreamOpen(this,
+ _MetadataCallback,
+ _SampleCallback,
+ kAudioFileAAC_ADTSType,
+ &mStream);
+ if (rv) {
+ NS_WARNING("Couldn't open AudioFileStream");
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ OSStatus status = AudioFileStreamParseBytes(mStream,
+ adtssample->Size(),
+ adtssample->Data(),
+ 0 /* discontinuity */);
+ if (status) {
+ NS_WARNING("Couldn't parse sample");
+ }
+
+ if (status || mFileStreamError || mMagicCookie.Length()) {
+ // We have decoded a magic cookie or an error occurred as such
+ // we won't need the stream any longer.
+ AudioFileStreamClose(mStream);
+ mStream = nullptr;
+ }
+
+ return (mFileStreamError || status) ? NS_ERROR_FAILURE : NS_OK;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleATDecoder.h b/dom/media/platforms/apple/AppleATDecoder.h
new file mode 100644
index 000000000..be232e07b
--- /dev/null
+++ b/dom/media/platforms/apple/AppleATDecoder.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AppleATDecoder_h
+#define mozilla_AppleATDecoder_h
+
+#include <AudioToolbox/AudioToolbox.h>
+#include "PlatformDecoderModule.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/Vector.h"
+#include "nsIThread.h"
+#include "AudioConverter.h"
+
+namespace mozilla {
+
+class TaskQueue;
+class MediaDataDecoderCallback;
+
+class AppleATDecoder : public MediaDataDecoder {
+public:
+ AppleATDecoder(const AudioInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback);
+ virtual ~AppleATDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "apple CoreMedia decoder";
+ }
+
+ // Callbacks also need access to the config.
+ const AudioInfo& mConfig;
+
+ // Use to extract magic cookie for HE-AAC detection.
+ nsTArray<uint8_t> mMagicCookie;
+ // Will be set to true should an error occurred while attempting to retrieve
+ // the magic cookie property.
+ bool mFileStreamError;
+
+private:
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+ AudioConverterRef mConverter;
+ AudioStreamBasicDescription mOutputFormat;
+ UInt32 mFormatID;
+ AudioFileStreamID mStream;
+ nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
+ UniquePtr<AudioConfig::ChannelLayout> mChannelLayout;
+ UniquePtr<AudioConverter> mAudioConverter;
+ Atomic<bool> mIsFlushing;
+
+ void ProcessFlush();
+ void ProcessShutdown();
+ void SubmitSample(MediaRawData* aSample);
+ MediaResult DecodeSample(MediaRawData* aSample);
+ MediaResult GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
+ const nsTArray<uint8_t>& aExtraData);
+ // Setup AudioConverter once all information required has been gathered.
+ // Will return NS_ERROR_NOT_INITIALIZED if more data is required.
+ MediaResult SetupDecoder(MediaRawData* aSample);
+ nsresult GetImplicitAACMagicCookie(const MediaRawData* aSample);
+ nsresult SetupChannelLayout();
+ uint32_t mParsedFramesForAACMagicCookie;
+ bool mErrored;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleATDecoder_h
diff --git a/dom/media/platforms/apple/AppleCMFunctions.h b/dom/media/platforms/apple/AppleCMFunctions.h
new file mode 100644
index 000000000..2c0408490
--- /dev/null
+++ b/dom/media/platforms/apple/AppleCMFunctions.h
@@ -0,0 +1,12 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Construct references to each of the CoreMedia symbols we use.
+
+LINK_FUNC(VideoFormatDescriptionCreate)
+LINK_FUNC(BlockBufferCreateWithMemoryBlock)
+LINK_FUNC(SampleBufferCreate)
+LINK_FUNC(TimeMake)
diff --git a/dom/media/platforms/apple/AppleCMLinker.cpp b/dom/media/platforms/apple/AppleCMLinker.cpp
new file mode 100644
index 000000000..5227bf9e5
--- /dev/null
+++ b/dom/media/platforms/apple/AppleCMLinker.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <dlfcn.h>
+
+#include "AppleCMLinker.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsDebug.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+AppleCMLinker::LinkStatus
+AppleCMLinker::sLinkStatus = LinkStatus_INIT;
+
+void* AppleCMLinker::sLink = nullptr;
+CFStringRef AppleCMLinker::skPropExtensionAtoms = nullptr;
+CFStringRef AppleCMLinker::skPropFullRangeVideo = nullptr;
+
+#define LINK_FUNC(func) typeof(CM ## func) CM ## func;
+#include "AppleCMFunctions.h"
+#undef LINK_FUNC
+
+/* static */ bool
+AppleCMLinker::Link()
+{
+ if (sLinkStatus) {
+ return sLinkStatus == LinkStatus_SUCCEEDED;
+ }
+
+ const char* dlnames[] =
+ { "/System/Library/Frameworks/CoreMedia.framework/CoreMedia",
+ "/System/Library/PrivateFrameworks/CoreMedia.framework/CoreMedia" };
+ bool dlfound = false;
+ for (size_t i = 0; i < ArrayLength(dlnames); i++) {
+ if ((sLink = dlopen(dlnames[i], RTLD_NOW | RTLD_LOCAL))) {
+ dlfound = true;
+ break;
+ }
+ }
+ if (!dlfound) {
+ NS_WARNING("Couldn't load CoreMedia framework");
+ goto fail;
+ }
+
+#define LINK_FUNC2(func) \
+ func = (typeof(func))dlsym(sLink, #func); \
+ if (!func) { \
+ NS_WARNING("Couldn't load CoreMedia function " #func ); \
+ goto fail; \
+ }
+#define LINK_FUNC(func) LINK_FUNC2(CM ## func)
+#include "AppleCMFunctions.h"
+#undef LINK_FUNC
+#undef LINK_FUNC2
+
+ skPropExtensionAtoms =
+ GetIOConst("kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms");
+
+ skPropFullRangeVideo =
+ GetIOConst("kCMFormatDescriptionExtension_FullRangeVideo");
+
+ if (!skPropExtensionAtoms) {
+ goto fail;
+ }
+
+ LOG("Loaded CoreMedia framework.");
+ sLinkStatus = LinkStatus_SUCCEEDED;
+ return true;
+
+fail:
+ Unlink();
+
+ sLinkStatus = LinkStatus_FAILED;
+ return false;
+}
+
+/* static */ void
+AppleCMLinker::Unlink()
+{
+ if (sLink) {
+ LOG("Unlinking CoreMedia framework.");
+ dlclose(sLink);
+ sLink = nullptr;
+ sLinkStatus = LinkStatus_INIT;
+ }
+}
+
+/* static */ CFStringRef
+AppleCMLinker::GetIOConst(const char* symbol)
+{
+ CFStringRef* address = (CFStringRef*)dlsym(sLink, symbol);
+ if (!address) {
+ return nullptr;
+ }
+
+ return *address;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleCMLinker.h b/dom/media/platforms/apple/AppleCMLinker.h
new file mode 100644
index 000000000..74372b6af
--- /dev/null
+++ b/dom/media/platforms/apple/AppleCMLinker.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AppleCMLinker_h
+#define AppleCMLinker_h
+
+extern "C" {
+#pragma GCC visibility push(default)
+#include <CoreMedia/CoreMedia.h>
+#pragma GCC visibility pop
+}
+
+#include "nscore.h"
+
+namespace mozilla {
+
+class AppleCMLinker
+{
+public:
+ static bool Link();
+ static void Unlink();
+ static CFStringRef skPropExtensionAtoms;
+ static CFStringRef skPropFullRangeVideo;
+
+private:
+ static void* sLink;
+
+ static enum LinkStatus {
+ LinkStatus_INIT = 0,
+ LinkStatus_FAILED,
+ LinkStatus_SUCCEEDED
+ } sLinkStatus;
+
+ static CFStringRef GetIOConst(const char* symbol);
+};
+
+#define LINK_FUNC(func) extern typeof(CM ## func)* CM ## func;
+#include "AppleCMFunctions.h"
+#undef LINK_FUNC
+
+} // namespace mozilla
+
+#endif // AppleCMLinker_h
diff --git a/dom/media/platforms/apple/AppleDecoderModule.cpp b/dom/media/platforms/apple/AppleDecoderModule.cpp
new file mode 100644
index 000000000..9976c86ab
--- /dev/null
+++ b/dom/media/platforms/apple/AppleDecoderModule.cpp
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AppleATDecoder.h"
+#include "AppleCMLinker.h"
+#include "AppleDecoderModule.h"
+#include "AppleVTDecoder.h"
+#include "AppleVTLinker.h"
+#include "MacIOSurfaceImage.h"
+#include "MediaPrefs.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Logging.h"
+#include "mozilla/gfx/gfxVars.h"
+
+namespace mozilla {
+
+bool AppleDecoderModule::sInitialized = false;
+bool AppleDecoderModule::sIsCoreMediaAvailable = false;
+bool AppleDecoderModule::sIsVTAvailable = false;
+bool AppleDecoderModule::sIsVTHWAvailable = false;
+bool AppleDecoderModule::sCanUseHardwareVideoDecoder = true;
+
+AppleDecoderModule::AppleDecoderModule()
+{
+}
+
+AppleDecoderModule::~AppleDecoderModule()
+{
+}
+
+/* static */
+void
+AppleDecoderModule::Init()
+{
+ if (sInitialized) {
+ return;
+ }
+
+ // Ensure IOSurface framework is loaded.
+ MacIOSurfaceLib::LoadLibrary();
+ const bool loaded = MacIOSurfaceLib::isInit();
+
+ // dlopen CoreMedia.framework if it's available.
+ sIsCoreMediaAvailable = AppleCMLinker::Link();
+ // dlopen VideoToolbox.framework if it's available.
+ // We must link both CM and VideoToolbox framework to allow for proper
+ // paired Link/Unlink calls
+ bool haveVideoToolbox = loaded && AppleVTLinker::Link();
+ sIsVTAvailable = sIsCoreMediaAvailable && haveVideoToolbox;
+
+ sIsVTHWAvailable = AppleVTLinker::skPropEnableHWAccel != nullptr;
+
+ sCanUseHardwareVideoDecoder = loaded &&
+ gfx::gfxVars::CanUseHardwareVideoDecoding();
+
+ sInitialized = true;
+}
+
+nsresult
+AppleDecoderModule::Startup()
+{
+ if (!sInitialized || !sIsVTAvailable) {
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+already_AddRefed<MediaDataDecoder>
+AppleDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> decoder =
+ new AppleVTDecoder(aParams.VideoConfig(),
+ aParams.mTaskQueue,
+ aParams.mCallback,
+ aParams.mImageContainer);
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+AppleDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> decoder =
+ new AppleATDecoder(aParams.AudioConfig(),
+ aParams.mTaskQueue,
+ aParams.mCallback);
+ return decoder.forget();
+}
+
+bool
+AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ return (sIsCoreMediaAvailable &&
+ (aMimeType.EqualsLiteral("audio/mpeg") ||
+ aMimeType.EqualsLiteral("audio/mp4a-latm"))) ||
+ (sIsVTAvailable && (aMimeType.EqualsLiteral("video/mp4") ||
+ aMimeType.EqualsLiteral("video/avc")));
+}
+
+PlatformDecoderModule::ConversionRequired
+AppleDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ if (aConfig.IsVideo()) {
+ return ConversionRequired::kNeedAVCC;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleDecoderModule.h b/dom/media/platforms/apple/AppleDecoderModule.h
new file mode 100644
index 000000000..43a828e63
--- /dev/null
+++ b/dom/media/platforms/apple/AppleDecoderModule.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AppleDecoderModule_h
+#define mozilla_AppleDecoderModule_h
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class AppleDecoderModule : public PlatformDecoderModule {
+public:
+ AppleDecoderModule();
+ virtual ~AppleDecoderModule();
+
+ nsresult Startup() override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+
+ static void Init();
+
+ static bool sCanUseHardwareVideoDecoder;
+
+private:
+ static bool sInitialized;
+ static bool sIsCoreMediaAvailable;
+ static bool sIsVTAvailable;
+ static bool sIsVTHWAvailable;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleDecoderModule_h
diff --git a/dom/media/platforms/apple/AppleUtils.h b/dom/media/platforms/apple/AppleUtils.h
new file mode 100644
index 000000000..9e30aff86
--- /dev/null
+++ b/dom/media/platforms/apple/AppleUtils.h
@@ -0,0 +1,98 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Utility functions to help with Apple API calls.
+
+#ifndef mozilla_AppleUtils_h
+#define mozilla_AppleUtils_h
+
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+// Wrapper class to call CFRelease on reference types
+// when they go out of scope.
+template <class T>
+class AutoCFRelease {
+public:
+ MOZ_IMPLICIT AutoCFRelease(T aRef)
+ : mRef(aRef)
+ {
+ }
+ ~AutoCFRelease()
+ {
+ if (mRef) {
+ CFRelease(mRef);
+ }
+ }
+ // Return the wrapped ref so it can be used as an in parameter.
+ operator T()
+ {
+ return mRef;
+ }
+ // Return a pointer to the wrapped ref for use as an out parameter.
+ T* receive()
+ {
+ return &mRef;
+ }
+
+private:
+ // Copy operator isn't supported and is not implemented.
+ AutoCFRelease<T>& operator=(const AutoCFRelease<T>&);
+ T mRef;
+};
+
+// CFRefPtr: A CoreFoundation smart pointer.
+template <class T>
+class CFRefPtr {
+public:
+ explicit CFRefPtr(T aRef)
+ : mRef(aRef)
+ {
+ if (mRef) {
+ CFRetain(mRef);
+ }
+ }
+ // Copy constructor.
+ CFRefPtr(const CFRefPtr<T>& aCFRefPtr)
+ : mRef(aCFRefPtr.mRef)
+ {
+ if (mRef) {
+ CFRetain(mRef);
+ }
+ }
+ // Copy operator
+ CFRefPtr<T>& operator=(const CFRefPtr<T>& aCFRefPtr)
+ {
+ if (mRef == aCFRefPtr.mRef) {
+ return;
+ }
+ if (mRef) {
+ CFRelease(mRef);
+ }
+ mRef = aCFRefPtr.mRef;
+ if (mRef) {
+ CFRetain(mRef);
+ }
+ return *this;
+ }
+ ~CFRefPtr()
+ {
+ if (mRef) {
+ CFRelease(mRef);
+ }
+ }
+ // Return the wrapped ref so it can be used as an in parameter.
+ operator T()
+ {
+ return mRef;
+ }
+
+private:
+ T mRef;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleUtils_h
diff --git a/dom/media/platforms/apple/AppleVTDecoder.cpp b/dom/media/platforms/apple/AppleVTDecoder.cpp
new file mode 100644
index 000000000..81638870a
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -0,0 +1,674 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <CoreFoundation/CFString.h>
+
+#include "AppleCMLinker.h"
+#include "AppleDecoderModule.h"
+#include "AppleUtils.h"
+#include "AppleVTDecoder.h"
+#include "AppleVTLinker.h"
+#include "MediaData.h"
+#include "mozilla/ArrayUtils.h"
+#include "mp4_demuxer/H264.h"
+#include "nsAutoPtr.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Logging.h"
+#include "VideoUtils.h"
+#include "gfxPlatform.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+ : mExtraData(aConfig.mExtraData)
+ , mCallback(aCallback)
+ , mPictureWidth(aConfig.mImage.width)
+ , mPictureHeight(aConfig.mImage.height)
+ , mDisplayWidth(aConfig.mDisplay.width)
+ , mDisplayHeight(aConfig.mDisplay.height)
+ , mTaskQueue(aTaskQueue)
+ , mMaxRefFrames(mp4_demuxer::H264::ComputeMaxRefFrames(aConfig.mExtraData))
+ , mImageContainer(aImageContainer)
+ , mIsShutDown(false)
+#ifdef MOZ_WIDGET_UIKIT
+ , mUseSoftwareImages(true)
+#else
+ , mUseSoftwareImages(false)
+#endif
+ , mIsFlushing(false)
+ , mMonitor("AppleVideoDecoder")
+ , mFormat(nullptr)
+ , mSession(nullptr)
+ , mIsHardwareAccelerated(false)
+{
+ MOZ_COUNT_CTOR(AppleVTDecoder);
+ // TODO: Verify aConfig.mime_type.
+ LOG("Creating AppleVTDecoder for %dx%d h.264 video",
+ mDisplayWidth,
+ mDisplayHeight
+ );
+}
+
+AppleVTDecoder::~AppleVTDecoder()
+{
+ MOZ_COUNT_DTOR(AppleVTDecoder);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+AppleVTDecoder::Init()
+{
+ nsresult rv = InitializeSession();
+
+ if (NS_SUCCEEDED(rv)) {
+ return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__);
+ }
+
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+}
+
+void
+AppleVTDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+
+ LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
+ aSample,
+ aSample->mTime,
+ aSample->mDuration,
+ aSample->mKeyframe ? " keyframe" : "",
+ aSample->Size());
+
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &AppleVTDecoder::ProcessDecode, aSample));
+}
+
+void
+AppleVTDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+
+ mSeekTargetThreshold.reset();
+}
+
+void
+AppleVTDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleVTDecoder::ProcessDrain);
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+void
+AppleVTDecoder::Shutdown()
+{
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+ mIsShutDown = true;
+ if (mTaskQueue) {
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleVTDecoder::ProcessShutdown);
+ mTaskQueue->Dispatch(runnable.forget());
+ } else {
+ ProcessShutdown();
+ }
+}
+
+nsresult
+AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ AssertOnTaskQueueThread();
+
+ if (mIsFlushing) {
+ return NS_OK;
+ }
+
+ auto rv = DoDecode(aSample);
+
+ return rv;
+}
+
+void
+AppleVTDecoder::ProcessShutdown()
+{
+ if (mSession) {
+ LOG("%s: cleaning up session %p", __func__, mSession);
+ VTDecompressionSessionInvalidate(mSession);
+ CFRelease(mSession);
+ mSession = nullptr;
+ }
+ if (mFormat) {
+ LOG("%s: releasing format %p", __func__, mFormat);
+ CFRelease(mFormat);
+ mFormat = nullptr;
+ }
+}
+
+void
+AppleVTDecoder::ProcessFlush()
+{
+ AssertOnTaskQueueThread();
+ nsresult rv = WaitForAsynchronousFrames();
+ if (NS_FAILED(rv)) {
+ LOG("AppleVTDecoder::Flush failed waiting for platform decoder "
+ "with error:%d.", rv);
+ }
+ ClearReorderedFrames();
+}
+
+void
+AppleVTDecoder::ProcessDrain()
+{
+ AssertOnTaskQueueThread();
+ nsresult rv = WaitForAsynchronousFrames();
+ if (NS_FAILED(rv)) {
+ LOG("AppleVTDecoder::Drain failed waiting for platform decoder "
+ "with error:%d.", rv);
+ }
+ DrainReorderedFrames();
+ mCallback->DrainComplete();
+}
+
+AppleVTDecoder::AppleFrameRef*
+AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
+{
+ MOZ_ASSERT(aSample);
+ return new AppleFrameRef(*aSample);
+}
+
+void
+AppleVTDecoder::DrainReorderedFrames()
+{
+ MonitorAutoLock mon(mMonitor);
+ while (!mReorderQueue.IsEmpty()) {
+ mCallback->Output(mReorderQueue.Pop().get());
+ }
+}
+
+void
+AppleVTDecoder::ClearReorderedFrames()
+{
+ MonitorAutoLock mon(mMonitor);
+ while (!mReorderQueue.IsEmpty()) {
+ mReorderQueue.Pop();
+ }
+}
+
+void
+AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+ LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
+ mSeekTargetThreshold = Some(aTime);
+}
+
+//
+// Implementation details.
+//
+
+// Callback passed to the VideoToolbox decoder for returning data.
+// This needs to be static because the API takes a C-style pair of
+// function and userdata pointers. This validates parameters and
+// forwards the decoded image back to an object method.
+static void
+PlatformCallback(void* decompressionOutputRefCon,
+ void* sourceFrameRefCon,
+ OSStatus status,
+ VTDecodeInfoFlags flags,
+ CVImageBufferRef image,
+ CMTime presentationTimeStamp,
+ CMTime presentationDuration)
+{
+ LOG("AppleVideoDecoder %s status %d flags %d", __func__, status, flags);
+
+ AppleVTDecoder* decoder =
+ static_cast<AppleVTDecoder*>(decompressionOutputRefCon);
+ nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef(
+ static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon));
+
+ // Validate our arguments.
+ if (status != noErr || !image) {
+ NS_WARNING("VideoToolbox decoder returned no data");
+ image = nullptr;
+ } else if (flags & kVTDecodeInfo_FrameDropped) {
+ NS_WARNING(" ...frame tagged as dropped...");
+ } else {
+ MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
+ "VideoToolbox returned an unexpected image type");
+ }
+ decoder->OutputFrame(image, *frameRef);
+}
+
+// Copy and return a decoded frame.
+nsresult
+AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
+ AppleVTDecoder::AppleFrameRef aFrameRef)
+{
+ if (mIsShutDown || mIsFlushing) {
+ // We are in the process of flushing or shutting down; ignore frame.
+ return NS_OK;
+ }
+
+ LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
+ aFrameRef.byte_offset,
+ aFrameRef.decode_timestamp.ToMicroseconds(),
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds(),
+ aFrameRef.is_sync_point ? " keyframe" : ""
+ );
+
+ if (!aImage) {
+ // Image was dropped by decoder or none return yet.
+ // We need more input to continue.
+ mCallback->InputExhausted();
+ return NS_OK;
+ }
+
+ bool useNullSample = false;
+ if (mSeekTargetThreshold.isSome()) {
+ if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
+ useNullSample = true;
+ } else {
+ mSeekTargetThreshold.reset();
+ }
+ }
+
+ // Where our resulting image will end up.
+ RefPtr<MediaData> data;
+ // Bounds.
+ VideoInfo info;
+ info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
+ gfx::IntRect visible = gfx::IntRect(0,
+ 0,
+ mPictureWidth,
+ mPictureHeight);
+
+ if (useNullSample) {
+ data = new NullData(aFrameRef.byte_offset,
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds());
+ } else if (mUseSoftwareImages) {
+ size_t width = CVPixelBufferGetWidth(aImage);
+ size_t height = CVPixelBufferGetHeight(aImage);
+ DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
+ MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
+
+ VideoData::YCbCrBuffer buffer;
+
+ // Lock the returned image data.
+ CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
+ if (rv != kCVReturnSuccess) {
+ NS_ERROR("error locking pixel data");
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)));
+ return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ }
+ // Y plane.
+ buffer.mPlanes[0].mData =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
+ buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
+ buffer.mPlanes[0].mWidth = width;
+ buffer.mPlanes[0].mHeight = height;
+ buffer.mPlanes[0].mOffset = 0;
+ buffer.mPlanes[0].mSkip = 0;
+ // Cb plane.
+ buffer.mPlanes[1].mData =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
+ buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
+ buffer.mPlanes[1].mWidth = (width+1) / 2;
+ buffer.mPlanes[1].mHeight = (height+1) / 2;
+ buffer.mPlanes[1].mOffset = 0;
+ buffer.mPlanes[1].mSkip = 1;
+ // Cr plane.
+ buffer.mPlanes[2].mData =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
+ buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
+ buffer.mPlanes[2].mWidth = (width+1) / 2;
+ buffer.mPlanes[2].mHeight = (height+1) / 2;
+ buffer.mPlanes[2].mOffset = 1;
+ buffer.mPlanes[2].mSkip = 1;
+
+ // Copy the image data into our own format.
+ data =
+ VideoData::CreateAndCopyData(info,
+ mImageContainer,
+ aFrameRef.byte_offset,
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds(),
+ buffer,
+ aFrameRef.is_sync_point,
+ aFrameRef.decode_timestamp.ToMicroseconds(),
+ visible);
+ // Unlock the returned image data.
+ CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
+ } else {
+#ifndef MOZ_WIDGET_UIKIT
+ IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
+ MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
+
+ RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
+
+ RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
+
+ data =
+ VideoData::CreateFromImage(info,
+ aFrameRef.byte_offset,
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds(),
+ image.forget(),
+ aFrameRef.is_sync_point,
+ aFrameRef.decode_timestamp.ToMicroseconds(),
+ visible);
+#else
+ MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
+#endif
+ }
+
+ if (!data) {
+ NS_ERROR("Couldn't create VideoData for frame");
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ // Frames come out in DTS order but we need to output them
+ // in composition order.
+ MonitorAutoLock mon(mMonitor);
+ mReorderQueue.Push(data);
+ if (mReorderQueue.Length() > mMaxRefFrames) {
+ mCallback->Output(mReorderQueue.Pop().get());
+ }
+ mCallback->InputExhausted();
+ LOG("%llu decoded frames queued",
+ static_cast<unsigned long long>(mReorderQueue.Length()));
+
+ return NS_OK;
+}
+
+nsresult
+AppleVTDecoder::WaitForAsynchronousFrames()
+{
+ OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
+ if (rv != noErr) {
+ LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+// Helper to fill in a timestamp structure.
+static CMSampleTimingInfo
+TimingInfoFromSample(MediaRawData* aSample)
+{
+ CMSampleTimingInfo timestamp;
+
+ timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
+ timestamp.presentationTimeStamp =
+ CMTimeMake(aSample->mTime, USECS_PER_S);
+ timestamp.decodeTimeStamp =
+ CMTimeMake(aSample->mTimecode, USECS_PER_S);
+
+ return timestamp;
+}
+
+MediaResult
+AppleVTDecoder::DoDecode(MediaRawData* aSample)
+{
+ AssertOnTaskQueueThread();
+
+ // For some reason this gives me a double-free error with stagefright.
+ AutoCFRelease<CMBlockBufferRef> block = nullptr;
+ AutoCFRelease<CMSampleBufferRef> sample = nullptr;
+ VTDecodeInfoFlags infoFlags;
+ OSStatus rv;
+
+ // FIXME: This copies the sample data. I think we can provide
+ // a custom block source which reuses the aSample buffer.
+ // But note that there may be a problem keeping the samples
+ // alive over multiple frames.
+ rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
+ const_cast<uint8_t*>(aSample->Data()),
+ aSample->Size(),
+ kCFAllocatorNull, // Block allocator.
+ NULL, // Block source.
+ 0, // Data offset.
+ aSample->Size(),
+ false,
+ block.receive());
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create CMBlockBuffer");
+ mCallback->Error(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)));
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
+ rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, &timestamp, 0, NULL, sample.receive());
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create CMSampleBuffer");
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CMSampleBufferCreate:%x", rv)));
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+
+ VTDecodeFrameFlags decodeFlags =
+ kVTDecodeFrame_EnableAsynchronousDecompression;
+ rv = VTDecompressionSessionDecodeFrame(mSession,
+ sample,
+ decodeFlags,
+ CreateAppleFrameRef(aSample),
+ &infoFlags);
+ if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
+ LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
+ NS_WARNING("Couldn't pass frame to decoder");
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)));
+ return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+AppleVTDecoder::InitializeSession()
+{
+ OSStatus rv;
+
+ AutoCFRelease<CFDictionaryRef> extensions = CreateDecoderExtensions();
+
+ rv = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
+ kCMVideoCodecType_H264,
+ mPictureWidth,
+ mPictureHeight,
+ extensions,
+ &mFormat);
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create format description!");
+ return NS_ERROR_FAILURE;
+ }
+
+ // Contruct video decoder selection spec.
+ AutoCFRelease<CFDictionaryRef> spec = CreateDecoderSpecification();
+
+ // Contruct output configuration.
+ AutoCFRelease<CFDictionaryRef> outputConfiguration =
+ CreateOutputConfiguration();
+
+ VTDecompressionOutputCallbackRecord cb = { PlatformCallback, this };
+ rv = VTDecompressionSessionCreate(kCFAllocatorDefault,
+ mFormat,
+ spec, // Video decoder selection.
+ outputConfiguration, // Output video format.
+ &cb,
+ &mSession);
+
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create decompression session!");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (AppleVTLinker::skPropUsingHWAccel) {
+ CFBooleanRef isUsingHW = nullptr;
+ rv = VTSessionCopyProperty(mSession,
+ AppleVTLinker::skPropUsingHWAccel,
+ kCFAllocatorDefault,
+ &isUsingHW);
+ if (rv != noErr) {
+ LOG("AppleVTDecoder: system doesn't support hardware acceleration");
+ }
+ mIsHardwareAccelerated = rv == noErr && isUsingHW == kCFBooleanTrue;
+ LOG("AppleVTDecoder: %s hardware accelerated decoding",
+ mIsHardwareAccelerated ? "using" : "not using");
+ } else {
+ LOG("AppleVTDecoder: couldn't determine hardware acceleration status.");
+ }
+ return NS_OK;
+}
+
+CFDictionaryRef
+AppleVTDecoder::CreateDecoderExtensions()
+{
+ AutoCFRelease<CFDataRef> avc_data =
+ CFDataCreate(kCFAllocatorDefault,
+ mExtraData->Elements(),
+ mExtraData->Length());
+
+ const void* atomsKey[] = { CFSTR("avcC") };
+ const void* atomsValue[] = { avc_data };
+ static_assert(ArrayLength(atomsKey) == ArrayLength(atomsValue),
+ "Non matching keys/values array size");
+
+ AutoCFRelease<CFDictionaryRef> atoms =
+ CFDictionaryCreate(kCFAllocatorDefault,
+ atomsKey,
+ atomsValue,
+ ArrayLength(atomsKey),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+
+ const void* extensionKeys[] =
+ { kCVImageBufferChromaLocationBottomFieldKey,
+ kCVImageBufferChromaLocationTopFieldKey,
+ AppleCMLinker::skPropExtensionAtoms };
+
+ const void* extensionValues[] =
+ { kCVImageBufferChromaLocation_Left,
+ kCVImageBufferChromaLocation_Left,
+ atoms };
+ static_assert(ArrayLength(extensionKeys) == ArrayLength(extensionValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ extensionKeys,
+ extensionValues,
+ ArrayLength(extensionKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+CFDictionaryRef
+AppleVTDecoder::CreateDecoderSpecification()
+{
+ if (!AppleVTLinker::skPropEnableHWAccel) {
+ return nullptr;
+ }
+
+ const void* specKeys[] = { AppleVTLinker::skPropEnableHWAccel };
+ const void* specValues[1];
+ if (AppleDecoderModule::sCanUseHardwareVideoDecoder) {
+ specValues[0] = kCFBooleanTrue;
+ } else {
+ // This GPU is blacklisted for hardware decoding.
+ specValues[0] = kCFBooleanFalse;
+ }
+ static_assert(ArrayLength(specKeys) == ArrayLength(specValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ specKeys,
+ specValues,
+ ArrayLength(specKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+CFDictionaryRef
+AppleVTDecoder::CreateOutputConfiguration()
+{
+ if (mUseSoftwareImages) {
+ // Output format type:
+ SInt32 PixelFormatTypeValue =
+ kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
+ AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
+ CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberSInt32Type,
+ &PixelFormatTypeValue);
+ const void* outputKeys[] = { kCVPixelBufferPixelFormatTypeKey };
+ const void* outputValues[] = { PixelFormatTypeNumber };
+ static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ outputKeys,
+ outputValues,
+ ArrayLength(outputKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ }
+
+#ifndef MOZ_WIDGET_UIKIT
+ // Output format type:
+ SInt32 PixelFormatTypeValue = kCVPixelFormatType_422YpCbCr8;
+ AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
+ CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberSInt32Type,
+ &PixelFormatTypeValue);
+ // Construct IOSurface Properties
+ const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
+ const void* IOSurfaceValues[] = { kCFBooleanTrue };
+ static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
+ "Non matching keys/values array size");
+
+ // Contruct output configuration.
+ AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
+ CFDictionaryCreate(kCFAllocatorDefault,
+ IOSurfaceKeys,
+ IOSurfaceValues,
+ ArrayLength(IOSurfaceKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+
+ const void* outputKeys[] = { kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey,
+ kCVPixelBufferOpenGLCompatibilityKey };
+ const void* outputValues[] = { IOSurfaceProperties,
+ PixelFormatTypeNumber,
+ kCFBooleanTrue };
+ static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ outputKeys,
+ outputValues,
+ ArrayLength(outputKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+#else
+ MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
+#endif
+}
+
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleVTDecoder.h b/dom/media/platforms/apple/AppleVTDecoder.h
new file mode 100644
index 000000000..05d08c7c7
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AppleVTDecoder_h
+#define mozilla_AppleVTDecoder_h
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/Atomics.h"
+#include "nsIThread.h"
+#include "ReorderQueue.h"
+#include "TimeUnits.h"
+
+#include "VideoToolbox/VideoToolbox.h"
+
+namespace mozilla {
+
+class AppleVTDecoder : public MediaDataDecoder {
+public:
+ AppleVTDecoder(const VideoInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer);
+
+ class AppleFrameRef {
+ public:
+ media::TimeUnit decode_timestamp;
+ media::TimeUnit composition_timestamp;
+ media::TimeUnit duration;
+ int64_t byte_offset;
+ bool is_sync_point;
+
+ explicit AppleFrameRef(const MediaRawData& aSample)
+ : decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
+ , composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
+ , duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
+ , byte_offset(aSample.mOffset)
+ , is_sync_point(aSample.mKeyframe)
+ {
+ }
+ };
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override
+ {
+ return mIsHardwareAccelerated;
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ return mIsHardwareAccelerated
+ ? "apple hardware VT decoder"
+ : "apple software VT decoder";
+ }
+
+ // Access from the taskqueue and the decoder's thread.
+ // OutputFrame is thread-safe.
+ nsresult OutputFrame(CVPixelBufferRef aImage,
+ AppleFrameRef aFrameRef);
+
+private:
+ virtual ~AppleVTDecoder();
+ void ProcessFlush();
+ void ProcessDrain();
+ void ProcessShutdown();
+ nsresult ProcessDecode(MediaRawData* aSample);
+
+ void AssertOnTaskQueueThread()
+ {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ }
+
+ AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
+ void DrainReorderedFrames();
+ void ClearReorderedFrames();
+ CFDictionaryRef CreateOutputConfiguration();
+
+ const RefPtr<MediaByteBuffer> mExtraData;
+ MediaDataDecoderCallback* mCallback;
+ const uint32_t mPictureWidth;
+ const uint32_t mPictureHeight;
+ const uint32_t mDisplayWidth;
+ const uint32_t mDisplayHeight;
+
+ // Method to set up the decompression session.
+ nsresult InitializeSession();
+ nsresult WaitForAsynchronousFrames();
+ CFDictionaryRef CreateDecoderSpecification();
+ CFDictionaryRef CreateDecoderExtensions();
+ // Method to pass a frame to VideoToolbox for decoding.
+ MediaResult DoDecode(MediaRawData* aSample);
+
+ const RefPtr<TaskQueue> mTaskQueue;
+ const uint32_t mMaxRefFrames;
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ Atomic<bool> mIsShutDown;
+ const bool mUseSoftwareImages;
+
+ // Set on reader/decode thread calling Flush() to indicate that output is
+ // not required and so input samples on mTaskQueue need not be processed.
+ // Cleared on mTaskQueue in ProcessDrain().
+ Atomic<bool> mIsFlushing;
+ // Protects mReorderQueue.
+ Monitor mMonitor;
+ ReorderQueue mReorderQueue;
+ // Decoded frame will be dropped if its pts is smaller than this
+ // value. It shold be initialized before Input() or after Flush(). So it is
+ // safe to access it in OutputFrame without protecting.
+ Maybe<media::TimeUnit> mSeekTargetThreshold;
+
+ CMVideoFormatDescriptionRef mFormat;
+ VTDecompressionSessionRef mSession;
+ Atomic<bool> mIsHardwareAccelerated;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleVTDecoder_h
diff --git a/dom/media/platforms/apple/AppleVTFunctions.h b/dom/media/platforms/apple/AppleVTFunctions.h
new file mode 100644
index 000000000..62765afa4
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTFunctions.h
@@ -0,0 +1,14 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Construct references to each of the VideoToolbox symbols we use.
+
+LINK_FUNC(VTDecompressionSessionCreate)
+LINK_FUNC(VTDecompressionSessionDecodeFrame)
+LINK_FUNC(VTDecompressionSessionInvalidate)
+LINK_FUNC(VTDecompressionSessionWaitForAsynchronousFrames)
+LINK_FUNC(VTSessionCopyProperty)
+LINK_FUNC(VTSessionCopySupportedPropertyDictionary)
diff --git a/dom/media/platforms/apple/AppleVTLinker.cpp b/dom/media/platforms/apple/AppleVTLinker.cpp
new file mode 100644
index 000000000..51a8a0122
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTLinker.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <dlfcn.h>
+
+#include "AppleVTLinker.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsDebug.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+AppleVTLinker::LinkStatus
+AppleVTLinker::sLinkStatus = LinkStatus_INIT;
+
+void* AppleVTLinker::sLink = nullptr;
+CFStringRef AppleVTLinker::skPropEnableHWAccel = nullptr;
+CFStringRef AppleVTLinker::skPropUsingHWAccel = nullptr;
+
+#define LINK_FUNC(func) typeof(func) func;
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+
+/* static */ bool
+AppleVTLinker::Link()
+{
+ if (sLinkStatus) {
+ return sLinkStatus == LinkStatus_SUCCEEDED;
+ }
+
+ const char* dlnames[] =
+ { "/System/Library/Frameworks/VideoToolbox.framework/VideoToolbox",
+ "/System/Library/PrivateFrameworks/VideoToolbox.framework/VideoToolbox" };
+ bool dlfound = false;
+ for (size_t i = 0; i < ArrayLength(dlnames); i++) {
+ if ((sLink = dlopen(dlnames[i], RTLD_NOW | RTLD_LOCAL))) {
+ dlfound = true;
+ break;
+ }
+ }
+ if (!dlfound) {
+ NS_WARNING("Couldn't load VideoToolbox framework");
+ goto fail;
+ }
+
+#define LINK_FUNC(func) \
+ func = (typeof(func))dlsym(sLink, #func); \
+ if (!func) { \
+ NS_WARNING("Couldn't load VideoToolbox function " #func ); \
+ goto fail; \
+ }
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+
+ // Will only resolve in 10.9 and later.
+ skPropEnableHWAccel =
+ GetIOConst("kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder");
+ skPropUsingHWAccel =
+ GetIOConst("kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder");
+
+ LOG("Loaded VideoToolbox framework.");
+ sLinkStatus = LinkStatus_SUCCEEDED;
+ return true;
+
+fail:
+ Unlink();
+
+ sLinkStatus = LinkStatus_FAILED;
+ return false;
+}
+
+/* static */ void
+AppleVTLinker::Unlink()
+{
+ if (sLink) {
+ LOG("Unlinking VideoToolbox framework.");
+#define LINK_FUNC(func) \
+ func = nullptr;
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+ dlclose(sLink);
+ sLink = nullptr;
+ skPropEnableHWAccel = nullptr;
+ skPropUsingHWAccel = nullptr;
+ sLinkStatus = LinkStatus_INIT;
+ }
+}
+
+/* static */ CFStringRef
+AppleVTLinker::GetIOConst(const char* symbol)
+{
+ CFStringRef* address = (CFStringRef*)dlsym(sLink, symbol);
+ if (!address) {
+ return nullptr;
+ }
+
+ return *address;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleVTLinker.h b/dom/media/platforms/apple/AppleVTLinker.h
new file mode 100644
index 000000000..49783432d
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTLinker.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AppleVTLinker_h
+#define AppleVTLinker_h
+
+extern "C" {
+#pragma GCC visibility push(default)
+#include "VideoToolbox/VideoToolbox.h"
+#pragma GCC visibility pop
+}
+
+#include "nscore.h"
+
+namespace mozilla {
+
+class AppleVTLinker
+{
+public:
+ static bool Link();
+ static void Unlink();
+ static CFStringRef skPropEnableHWAccel;
+ static CFStringRef skPropUsingHWAccel;
+
+private:
+ static void* sLink;
+
+ static enum LinkStatus {
+ LinkStatus_INIT = 0,
+ LinkStatus_FAILED,
+ LinkStatus_SUCCEEDED
+ } sLinkStatus;
+
+ static CFStringRef GetIOConst(const char* symbol);
+};
+
+#define LINK_FUNC(func) extern typeof(func)* func;
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+
+} // namespace mozilla
+
+#endif // AppleVTLinker_h
diff --git a/dom/media/platforms/apple/VideoToolbox/VideoToolbox.h b/dom/media/platforms/apple/VideoToolbox/VideoToolbox.h
new file mode 100644
index 000000000..edd3f9c1b
--- /dev/null
+++ b/dom/media/platforms/apple/VideoToolbox/VideoToolbox.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Stub header for VideoToolbox framework API.
+// We include our own copy so we can build on MacOS versions
+// where it's not available.
+
+#ifndef mozilla_VideoToolbox_VideoToolbox_h
+#define mozilla_VideoToolbox_VideoToolbox_h
+
+// CoreMedia is available starting in OS X 10.7,
+// so we need to dlopen it as well to run on 10.6,
+// but we can depend on the real framework headers at build time.
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreMedia/CoreMedia.h>
+#include <CoreVideo/CVPixelBuffer.h>
+
+typedef uint32_t VTDecodeFrameFlags;
+typedef uint32_t VTDecodeInfoFlags;
+enum {
+ kVTDecodeInfo_Asynchronous = 1UL << 0,
+ kVTDecodeInfo_FrameDropped = 1UL << 1,
+};
+enum {
+ kVTDecodeFrame_EnableAsynchronousDecompression = 1<<0,
+ kVTDecodeFrame_DoNotOutputFrame = 1<<1,
+ kVTDecodeFrame_1xRealTimePlayback = 1<<2,
+ kVTDecodeFrame_EnableTemporalProcessing = 1<<3,
+};
+
+typedef CFTypeRef VTSessionRef;
+typedef struct OpaqueVTDecompressionSession* VTDecompressionSessionRef;
+typedef void (*VTDecompressionOutputCallback)(
+ void*,
+ void*,
+ OSStatus,
+ VTDecodeInfoFlags,
+ CVImageBufferRef,
+ CMTime,
+ CMTime
+);
+typedef struct VTDecompressionOutputCallbackRecord {
+ VTDecompressionOutputCallback decompressionOutputCallback;
+ void* decompressionOutputRefCon;
+} VTDecompressionOutputCallbackRecord;
+
+OSStatus
+VTDecompressionSessionCreate(
+ CFAllocatorRef,
+ CMVideoFormatDescriptionRef,
+ CFDictionaryRef,
+ CFDictionaryRef,
+ const VTDecompressionOutputCallbackRecord*,
+ VTDecompressionSessionRef*
+);
+
+OSStatus
+VTDecompressionSessionDecodeFrame(
+ VTDecompressionSessionRef,
+ CMSampleBufferRef,
+ VTDecodeFrameFlags,
+ void*,
+ VTDecodeInfoFlags*
+);
+
+OSStatus
+VTDecompressionSessionWaitForAsynchronousFrames(
+ VTDecompressionSessionRef
+);
+
+void
+VTDecompressionSessionInvalidate(
+ VTDecompressionSessionRef
+);
+
+OSStatus
+VTSessionCopyProperty(
+ VTSessionRef,
+ CFStringRef,
+ CFAllocatorRef,
+ void*
+);
+
+OSStatus
+VTSessionCopySupportedPropertyDictionary(
+ VTSessionRef,
+ CFDictionaryRef*
+);
+
+#endif // mozilla_VideoToolbox_VideoToolbox_h