summaryrefslogtreecommitdiffstats
path: root/dom/media
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media')
-rw-r--r--dom/media/AudioConverter.cpp108
-rw-r--r--dom/media/AudioStream.cpp6
-rw-r--r--dom/media/AudioStream.h4
-rw-r--r--dom/media/Benchmark.cpp2
-rw-r--r--dom/media/CubebUtils.cpp33
-rw-r--r--dom/media/CubebUtils.h2
-rw-r--r--dom/media/DecoderTraits.cpp115
-rw-r--r--dom/media/GraphDriver.cpp21
-rw-r--r--dom/media/GraphDriver.h6
-rw-r--r--dom/media/MP3FrameParser.cpp591
-rw-r--r--dom/media/MP3FrameParser.h219
-rw-r--r--dom/media/MediaData.h3
-rw-r--r--dom/media/MediaDecoder.cpp52
-rw-r--r--dom/media/MediaDecoder.h8
-rw-r--r--dom/media/MediaDecoderStateMachine.cpp53
-rw-r--r--dom/media/MediaFormatReader.cpp4
-rw-r--r--dom/media/MediaManager.cpp54
-rw-r--r--dom/media/MediaPrefs.h6
-rw-r--r--dom/media/MediaResource.cpp2
-rw-r--r--dom/media/MediaStreamGraph.cpp3
-rw-r--r--dom/media/ThreadPoolCOMListener.h4
-rw-r--r--dom/media/VideoFrameContainer.cpp12
-rw-r--r--dom/media/VideoUtils.cpp56
-rw-r--r--dom/media/VideoUtils.h5
-rw-r--r--dom/media/WebVTTListener.h1
-rw-r--r--dom/media/android/AndroidMediaDecoder.cpp25
-rw-r--r--dom/media/android/AndroidMediaDecoder.h28
-rw-r--r--dom/media/android/AndroidMediaPluginHost.cpp305
-rw-r--r--dom/media/android/AndroidMediaPluginHost.h41
-rw-r--r--dom/media/android/AndroidMediaReader.cpp449
-rw-r--r--dom/media/android/AndroidMediaReader.h75
-rw-r--r--dom/media/android/AndroidMediaResourceServer.cpp503
-rw-r--r--dom/media/android/AndroidMediaResourceServer.h96
-rw-r--r--dom/media/android/MPAPI.h165
-rw-r--r--dom/media/android/moz.build27
-rw-r--r--dom/media/directshow/AudioSinkFilter.cpp285
-rw-r--r--dom/media/directshow/AudioSinkFilter.h95
-rw-r--r--dom/media/directshow/AudioSinkInputPin.cpp195
-rw-r--r--dom/media/directshow/AudioSinkInputPin.h76
-rw-r--r--dom/media/directshow/DirectShowDecoder.cpp65
-rw-r--r--dom/media/directshow/DirectShowDecoder.h45
-rw-r--r--dom/media/directshow/DirectShowReader.cpp360
-rw-r--r--dom/media/directshow/DirectShowReader.h110
-rw-r--r--dom/media/directshow/DirectShowUtils.cpp369
-rw-r--r--dom/media/directshow/DirectShowUtils.h125
-rw-r--r--dom/media/directshow/SampleSink.cpp159
-rw-r--r--dom/media/directshow/SampleSink.h67
-rw-r--r--dom/media/directshow/SourceFilter.cpp683
-rw-r--r--dom/media/directshow/SourceFilter.h75
-rw-r--r--dom/media/directshow/moz.build41
-rw-r--r--dom/media/eme/DetailedPromise.cpp3
-rw-r--r--dom/media/eme/MediaKeySession.cpp3
-rw-r--r--dom/media/eme/MediaKeys.cpp2
-rw-r--r--dom/media/fmp4/MP4Decoder.cpp15
-rw-r--r--dom/media/fmp4/MP4Demuxer.cpp64
-rw-r--r--dom/media/fmp4/MP4Stream.cpp3
-rw-r--r--dom/media/fmp4/moz.build3
-rw-r--r--dom/media/gmp/GMPParent.cpp3
-rw-r--r--dom/media/gmp/widevine-adapter/WidevineAdapter.cpp10
-rw-r--r--dom/media/gmp/widevine-adapter/WidevineDecryptor.cpp101
-rw-r--r--dom/media/gmp/widevine-adapter/WidevineDecryptor.h24
-rw-r--r--dom/media/gmp/widevine-adapter/WidevineUtils.cpp4
-rw-r--r--dom/media/gmp/widevine-adapter/WidevineUtils.h10
-rw-r--r--dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h2
-rw-r--r--dom/media/gmp/widevine-adapter/content_decryption_module.h289
-rw-r--r--dom/media/gmp/widevine-adapter/content_decryption_module_export.h22
-rw-r--r--dom/media/gmp/widevine-adapter/content_decryption_module_ext.h64
-rw-r--r--dom/media/gtest/Cargo.toml7
-rw-r--r--dom/media/gtest/TestMP3Demuxer.cpp1
-rw-r--r--dom/media/gtest/TestMP4Reader.cpp217
-rw-r--r--dom/media/gtest/TestRust.cpp9
-rw-r--r--dom/media/gtest/hello.rs6
-rw-r--r--dom/media/gtest/moz.build1
-rw-r--r--dom/media/mediasource/ContainerParser.cpp3
-rw-r--r--dom/media/mediasource/MediaSource.cpp8
-rw-r--r--dom/media/mediasource/TrackBuffersManager.cpp2
-rw-r--r--dom/media/mediasource/moz.build3
-rw-r--r--dom/media/moz.build18
-rw-r--r--dom/media/mp3/MP3Decoder.cpp (renamed from dom/media/MP3Decoder.cpp)2
-rw-r--r--dom/media/mp3/MP3Decoder.h (renamed from dom/media/MP3Decoder.h)0
-rw-r--r--dom/media/mp3/MP3Demuxer.cpp (renamed from dom/media/MP3Demuxer.cpp)2
-rw-r--r--dom/media/mp3/MP3Demuxer.h (renamed from dom/media/MP3Demuxer.h)2
-rw-r--r--dom/media/mp3/moz.build17
-rw-r--r--dom/media/ogg/OggDemuxer.cpp2
-rw-r--r--dom/media/platforms/MediaTelemetryConstants.h22
-rw-r--r--dom/media/platforms/PDMFactory.cpp9
-rw-r--r--dom/media/platforms/agnostic/AOMDecoder.cpp332
-rw-r--r--dom/media/platforms/agnostic/AOMDecoder.h62
-rw-r--r--dom/media/platforms/agnostic/AgnosticDecoderModule.cpp19
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.cpp53
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.h12
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp18
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp3
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.h7
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp75
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h1
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/moz.build2
-rw-r--r--dom/media/platforms/moz.build9
-rw-r--r--dom/media/platforms/omx/OmxPlatformLayer.cpp23
-rw-r--r--dom/media/platforms/wmf/DXVA2Manager.cpp8
-rw-r--r--dom/media/platforms/wmf/WMFAudioMFTManager.cpp1
-rw-r--r--dom/media/platforms/wmf/WMFDecoderModule.h6
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataDecoder.cpp46
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataDecoder.h6
-rw-r--r--dom/media/platforms/wmf/WMFVideoMFTManager.cpp4
-rw-r--r--dom/media/systemservices/LoadManager.cpp18
-rw-r--r--dom/media/test/bug1377278.webm (renamed from dom/media/test/bug580982.webm)bin215594 -> 215594 bytes
-rw-r--r--dom/media/test/bug1377278.webm^headers^ (renamed from dom/media/test/bug580982.webm^headers^)0
-rw-r--r--dom/media/test/crashtests/1228484.html13
-rw-r--r--dom/media/test/crashtests/crashtests.list2
-rw-r--r--dom/media/test/manifest.js8
-rw-r--r--dom/media/test/mochitest.ini4
-rw-r--r--dom/media/test/test_can_play_type_mpeg.html3
-rw-r--r--dom/media/webaudio/AudioBuffer.cpp1
-rwxr-xr-xdom/media/webaudio/AudioContext.cpp12
-rw-r--r--dom/media/webaudio/AudioContext.h6
-rw-r--r--dom/media/webaudio/AudioParam.cpp1
-rw-r--r--dom/media/webaudio/MediaBufferDecoder.cpp3
-rw-r--r--dom/media/webaudio/WaveShaperNode.cpp1
-rw-r--r--dom/media/webm/WebMDecoder.cpp25
-rw-r--r--dom/media/webm/WebMDemuxer.cpp190
-rw-r--r--dom/media/webm/WebMDemuxer.h18
-rw-r--r--dom/media/webrtc/RTCCertificate.cpp2
123 files changed, 1283 insertions, 6498 deletions
diff --git a/dom/media/AudioConverter.cpp b/dom/media/AudioConverter.cpp
index 25b981f43..002e79108 100644
--- a/dom/media/AudioConverter.cpp
+++ b/dom/media/AudioConverter.cpp
@@ -5,8 +5,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioConverter.h"
-#include <string.h>
#include <speex/speex_resampler.h>
+#include <string.h>
#include <cmath>
/*
@@ -140,24 +140,28 @@ static inline int16_t clipTo15(int32_t aX)
size_t
AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const
{
- MOZ_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 ||
- mIn.Format() == AudioConfig::FORMAT_FLT);
- MOZ_ASSERT(mIn.Channels() >= mOut.Channels());
- MOZ_ASSERT(mIn.Layout() == AudioConfig::ChannelLayout(mIn.Channels()),
- "Can only downmix input data in SMPTE layout");
- MOZ_ASSERT(mOut.Layout() == AudioConfig::ChannelLayout(2) ||
- mOut.Layout() == AudioConfig::ChannelLayout(1));
+ MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 ||
+ mIn.Format() == AudioConfig::FORMAT_FLT);
+ MOZ_DIAGNOSTIC_ASSERT(mIn.Channels() >= mOut.Channels());
+ MOZ_DIAGNOSTIC_ASSERT(
+ mIn.Layout() == AudioConfig::ChannelLayout(mIn.Channels()),
+ "Can only downmix input data in SMPTE layout");
+ MOZ_DIAGNOSTIC_ASSERT(mOut.Layout() == AudioConfig::ChannelLayout(2) ||
+ mOut.Layout() == AudioConfig::ChannelLayout(1),
+ "Can only downmix to stereo or mono");
- uint32_t channels = mIn.Channels();
+ uint32_t inChannels = mIn.Channels();
+ uint32_t outChannels = mOut.Channels();
- if (channels == 1 && mOut.Channels() == 1) {
+ if (inChannels == outChannels) {
+ // Number of channels is equal; no processing needed, just move data.
if (aOut != aIn) {
memmove(aOut, aIn, FramesOutToBytes(aFrames));
}
return aFrames;
}
- if (channels > 2) {
+ if (inChannels > 2) {
if (mIn.Format() == AudioConfig::FORMAT_FLT) {
// Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8.
static const float dmatrix[6][8][2]= {
@@ -174,12 +178,18 @@ AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const
for (uint32_t i = 0; i < aFrames; i++) {
float sampL = 0.0;
float sampR = 0.0;
- for (uint32_t j = 0; j < channels; j++) {
- sampL += in[i*mIn.Channels()+j]*dmatrix[mIn.Channels()-3][j][0];
- sampR += in[i*mIn.Channels()+j]*dmatrix[mIn.Channels()-3][j][1];
+ for (uint32_t j = 0; j < inChannels; j++) {
+ sampL += in[i * inChannels + j] * dmatrix[inChannels - 3][j][0];
+ sampR += in[i * inChannels + j] * dmatrix[inChannels - 3][j][1];
+ }
+ if (outChannels == 2) {
+ // Stereo
+ *out++ = sampL;
+ *out++ = sampR;
+ } else {
+ // Mono
+ *out++ = (sampL + sampR) * 0.5;
}
- *out++ = sampL;
- *out++ = sampR;
}
} else if (mIn.Format() == AudioConfig::FORMAT_S16) {
// Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8.
@@ -198,45 +208,51 @@ AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const
for (uint32_t i = 0; i < aFrames; i++) {
int32_t sampL = 0;
int32_t sampR = 0;
- for (uint32_t j = 0; j < channels; j++) {
- sampL+=in[i*channels+j]*dmatrix[channels-3][j][0];
- sampR+=in[i*channels+j]*dmatrix[channels-3][j][1];
+ for (uint32_t j = 0; j < inChannels; j++) {
+ sampL += in[i * inChannels + j] * dmatrix[inChannels - 3][j][0];
+ sampR += in[i * inChannels + j] * dmatrix[inChannels - 3][j][1];
+ }
+ sampL = clipTo15((sampL + 8192) >> 14);
+ sampR = clipTo15((sampR + 8192) >> 14);
+ if (outChannels == 2) {
+ // Stereo
+ *out++ = sampL;
+ *out++ = sampR;
+ } else {
+ // Mono
+ *out++ = (sampL + sampR) * 0.5;
}
- *out++ = clipTo15((sampL + 8192)>>14);
- *out++ = clipTo15((sampR + 8192)>>14);
}
} else {
MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
}
- // If we are to continue downmixing to mono, start working on the output
- // buffer.
- aIn = aOut;
- channels = 2;
+ return aFrames;
}
- if (mOut.Channels() == 1) {
- if (mIn.Format() == AudioConfig::FORMAT_FLT) {
- const float* in = static_cast<const float*>(aIn);
- float* out = static_cast<float*>(aOut);
- for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
- float sample = 0.0;
- // The sample of the buffer would be interleaved.
- sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5;
- *out++ = sample;
- }
- } else if (mIn.Format() == AudioConfig::FORMAT_S16) {
- const int16_t* in = static_cast<const int16_t*>(aIn);
- int16_t* out = static_cast<int16_t*>(aOut);
- for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
- int32_t sample = 0.0;
- // The sample of the buffer would be interleaved.
- sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5;
- *out++ = sample;
- }
- } else {
- MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
+ // If we get here, we're doing a stereo -> mono conversion.
+ MOZ_DIAGNOSTIC_ASSERT(inChannels == 2 && outChannels == 1);
+
+ if (mIn.Format() == AudioConfig::FORMAT_FLT) {
+ const float* in = static_cast<const float*>(aIn);
+ float* out = static_cast<float*>(aOut);
+ for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
+ float sample = 0.0;
+ // The sample of the buffer would be interleaved.
+ sample = (in[fIdx * inChannels] + in[fIdx * inChannels + 1]) * 0.5;
+ *out++ = sample;
}
+ } else if (mIn.Format() == AudioConfig::FORMAT_S16) {
+ const int16_t* in = static_cast<const int16_t*>(aIn);
+ int16_t* out = static_cast<int16_t*>(aOut);
+ for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
+ int32_t sample = 0.0;
+ // The sample of the buffer would be interleaved.
+ sample = (in[fIdx * inChannels] + in[fIdx * inChannels + 1]) * 0.5;
+ *out++ = sample;
+ }
+ } else {
+ MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
}
return aFrames;
}
diff --git a/dom/media/AudioStream.cpp b/dom/media/AudioStream.cpp
index 54cf7b965..4b1d82c37 100644
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -14,7 +14,6 @@
#include "mozilla/Mutex.h"
#include "mozilla/Sprintf.h"
#include <algorithm>
-#include "mozilla/Telemetry.h"
#include "CubebUtils.h"
#include "nsPrintfCString.h"
#include "gfxPrefs.h"
@@ -346,7 +345,6 @@ AudioStream::Init(uint32_t aNumChannels, uint32_t aRate,
cubeb* cubebContext = CubebUtils::GetCubebContext();
if (!cubebContext) {
NS_WARNING("Can't get cubeb context!");
- CubebUtils::ReportCubebStreamInitFailure(true);
return NS_ERROR_DOM_MEDIA_CUBEB_INITIALIZATION_ERR;
}
@@ -368,18 +366,14 @@ AudioStream::OpenCubeb(cubeb* aContext, cubeb_stream_params& aParams,
latency_frames,
DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
mCubebStream.reset(stream);
- CubebUtils::ReportCubebBackendUsed();
} else {
NS_WARNING(nsPrintfCString("AudioStream::OpenCubeb() %p failed to init cubeb", this).get());
- CubebUtils::ReportCubebStreamInitFailure(aIsFirst);
return NS_ERROR_FAILURE;
}
TimeDuration timeDelta = TimeStamp::Now() - aStartTime;
LOG("creation time %sfirst: %u ms", aIsFirst ? "" : "not ",
(uint32_t) timeDelta.ToMilliseconds());
- Telemetry::Accumulate(aIsFirst ? Telemetry::AUDIOSTREAM_FIRST_OPEN_MS :
- Telemetry::AUDIOSTREAM_LATER_OPEN_MS, timeDelta.ToMilliseconds());
return NS_OK;
}
diff --git a/dom/media/AudioStream.h b/dom/media/AudioStream.h
index acc38b93d..199314d4b 100644
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -17,6 +17,10 @@
#include "mozilla/UniquePtr.h"
#include "CubebUtils.h"
#include "soundtouch/SoundTouchFactory.h"
+#ifdef XP_SOLARIS
+#include "soundtouch/SoundTouch.h"
+#endif
+
namespace mozilla {
diff --git a/dom/media/Benchmark.cpp b/dom/media/Benchmark.cpp
index fdbedeca5..7394f8036 100644
--- a/dom/media/Benchmark.cpp
+++ b/dom/media/Benchmark.cpp
@@ -11,7 +11,6 @@
#include "PDMFactory.h"
#include "WebMDemuxer.h"
#include "mozilla/Preferences.h"
-#include "mozilla/Telemetry.h"
#include "mozilla/dom/ContentChild.h"
#ifndef MOZ_WIDGET_ANDROID
@@ -68,7 +67,6 @@ VP9Benchmark::IsVP9DecodeFast()
Preferences::SetUint(sBenchmarkFpsPref, aDecodeFps);
Preferences::SetUint(sBenchmarkFpsVersionCheck, sBenchmarkVersionID);
}
- Telemetry::Accumulate(Telemetry::ID::VIDEO_VP9_BENCHMARK_FPS, aDecodeFps);
},
[]() { });
}
diff --git a/dom/media/CubebUtils.cpp b/dom/media/CubebUtils.cpp
index fe94264ee..0f0167d9c 100644
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -239,39 +239,6 @@ cubeb* GetCubebContextUnlocked()
return sCubebContext;
}
-void ReportCubebBackendUsed()
-{
- StaticMutexAutoLock lock(sMutex);
-
- sAudioStreamInitEverSucceeded = true;
-
- bool foundBackend = false;
- for (uint32_t i = 0; i < ArrayLength(AUDIOSTREAM_BACKEND_ID_STR); i++) {
- if (!strcmp(cubeb_get_backend_id(sCubebContext), AUDIOSTREAM_BACKEND_ID_STR[i])) {
- Telemetry::Accumulate(Telemetry::AUDIOSTREAM_BACKEND_USED, i);
- foundBackend = true;
- }
- }
- if (!foundBackend) {
- Telemetry::Accumulate(Telemetry::AUDIOSTREAM_BACKEND_USED,
- CUBEB_BACKEND_UNKNOWN);
- }
-}
-
-void ReportCubebStreamInitFailure(bool aIsFirst)
-{
- StaticMutexAutoLock lock(sMutex);
- if (!aIsFirst && !sAudioStreamInitEverSucceeded) {
- // This machine has no audio hardware, or it's in really bad shape, don't
- // send this info, since we want CUBEB_BACKEND_INIT_FAILURE_OTHER to detect
- // failures to open multiple streams in a process over time.
- return;
- }
- Telemetry::Accumulate(Telemetry::AUDIOSTREAM_BACKEND_USED,
- aIsFirst ? CUBEB_BACKEND_INIT_FAILURE_FIRST
- : CUBEB_BACKEND_INIT_FAILURE_OTHER);
-}
-
uint32_t GetCubebPlaybackLatencyInMilliseconds()
{
StaticMutexAutoLock lock(sMutex);
diff --git a/dom/media/CubebUtils.h b/dom/media/CubebUtils.h
index fa5fc2294..f43492374 100644
--- a/dom/media/CubebUtils.h
+++ b/dom/media/CubebUtils.h
@@ -35,8 +35,6 @@ double GetVolumeScale();
bool GetFirstStream();
cubeb* GetCubebContext();
cubeb* GetCubebContextUnlocked();
-void ReportCubebStreamInitFailure(bool aIsFirstStream);
-void ReportCubebBackendUsed();
uint32_t GetCubebPlaybackLatencyInMilliseconds();
Maybe<uint32_t> GetCubebMSGLatencyInFrames();
bool CubebLatencyPrefSet();
diff --git a/dom/media/DecoderTraits.cpp b/dom/media/DecoderTraits.cpp
index ddd35fe0d..6aa44f3e5 100644
--- a/dom/media/DecoderTraits.cpp
+++ b/dom/media/DecoderTraits.cpp
@@ -10,7 +10,6 @@
#include "nsCharSeparatedTokenizer.h"
#include "nsMimeTypes.h"
#include "mozilla/Preferences.h"
-#include "mozilla/Telemetry.h"
#include "OggDecoder.h"
#include "OggDemuxer.h"
@@ -18,15 +17,6 @@
#include "WebMDecoder.h"
#include "WebMDemuxer.h"
-#ifdef MOZ_ANDROID_OMX
-#include "AndroidMediaDecoder.h"
-#include "AndroidMediaReader.h"
-#include "AndroidMediaPluginHost.h"
-#endif
-#ifdef MOZ_DIRECTSHOW
-#include "DirectShowDecoder.h"
-#include "DirectShowReader.h"
-#endif
#ifdef MOZ_FMP4
#include "MP4Decoder.h"
#include "MP4Demuxer.h"
@@ -94,45 +84,6 @@ DecoderTraits::IsWebMAudioType(const nsACString& aType)
return aType.EqualsASCII("audio/webm");
}
-static char const *const gHttpLiveStreamingTypes[] = {
- // For m3u8.
- // https://tools.ietf.org/html/draft-pantos-http-live-streaming-19#section-10
- "application/vnd.apple.mpegurl",
- // Some sites serve these as the informal m3u type.
- "application/x-mpegurl",
- "audio/x-mpegurl",
- nullptr
-};
-
-static bool
-IsHttpLiveStreamingType(const nsACString& aType)
-{
- return CodecListContains(gHttpLiveStreamingTypes, aType);
-}
-
-#ifdef MOZ_ANDROID_OMX
-static bool
-IsAndroidMediaType(const nsACString& aType)
-{
- if (!MediaDecoder::IsAndroidMediaPluginEnabled()) {
- return false;
- }
-
- static const char* supportedTypes[] = {
- "audio/mpeg", "audio/mp4", "video/mp4", "video/x-m4v", nullptr
- };
- return CodecListContains(supportedTypes, aType);
-}
-#endif
-
-#ifdef MOZ_DIRECTSHOW
-static bool
-IsDirectShowSupportedType(const nsACString& aType)
-{
- return DirectShowDecoder::GetSupportedCodecs(aType, nullptr);
-}
-#endif
-
#ifdef MOZ_FMP4
static bool
IsMP4SupportedType(const MediaContentType& aParsedType,
@@ -247,14 +198,6 @@ CanHandleCodecsType(const MediaContentType& aType,
if (IsFlacSupportedType(aType.GetMIMEType(), aType.GetCodecs())) {
return CANPLAY_YES;
}
-#ifdef MOZ_DIRECTSHOW
- DirectShowDecoder::GetSupportedCodecs(aType.GetMIMEType(), &codecList);
-#endif
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled()) {
- EnsureAndroidMediaPluginHost()->FindDecoder(aType.GetMIMEType(), &codecList);
- }
-#endif
if (!codecList) {
return CANPLAY_MAYBE;
}
@@ -287,10 +230,6 @@ CanHandleMediaType(const MediaContentType& aType,
{
MOZ_ASSERT(NS_IsMainThread());
- if (IsHttpLiveStreamingType(aType.GetMIMEType())) {
- Telemetry::Accumulate(Telemetry::MEDIA_HLS_CANPLAY_REQUESTED, true);
- }
-
if (aType.HaveCodecs()) {
CanPlayStatus result = CanHandleCodecsType(aType, aDiagnostics);
if (result == CANPLAY_NO || result == CANPLAY_YES) {
@@ -320,17 +259,6 @@ CanHandleMediaType(const MediaContentType& aType,
if (IsFlacSupportedType(aType.GetMIMEType())) {
return CANPLAY_MAYBE;
}
-#ifdef MOZ_DIRECTSHOW
- if (DirectShowDecoder::GetSupportedCodecs(aType.GetMIMEType(), nullptr)) {
- return CANPLAY_MAYBE;
- }
-#endif
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled() &&
- EnsureAndroidMediaPluginHost()->FindDecoder(aType.GetMIMEType(), nullptr)) {
- return CANPLAY_MAYBE;
- }
-#endif
return CANPLAY_NO;
}
@@ -411,33 +339,12 @@ InstantiateDecoder(const nsACString& aType,
decoder = new FlacDecoder(aOwner);
return decoder.forget();
}
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled() &&
- EnsureAndroidMediaPluginHost()->FindDecoder(aType, nullptr)) {
- decoder = new AndroidMediaDecoder(aOwner, aType);
- return decoder.forget();
- }
-#endif
if (IsWebMSupportedType(aType)) {
decoder = new WebMDecoder(aOwner);
return decoder.forget();
}
-#ifdef MOZ_DIRECTSHOW
- // Note: DirectShow should come before WMF, so that we prefer DirectShow's
- // MP3 support over WMF's.
- if (IsDirectShowSupportedType(aType)) {
- decoder = new DirectShowDecoder(aOwner);
- return decoder.forget();
- }
-#endif
-
- if (IsHttpLiveStreamingType(aType)) {
- // We don't have an HLS decoder.
- Telemetry::Accumulate(Telemetry::MEDIA_HLS_DECODER_SUCCESS, false);
- }
-
return nullptr;
}
@@ -466,7 +373,7 @@ MediaDecoderReader* DecoderTraits::CreateReader(const nsACString& aType, Abstrac
} else
#endif
if (IsMP3SupportedType(aType)) {
- decoderReader = new MediaFormatReader(aDecoder, new mp3::MP3Demuxer(aDecoder->GetResource()));
+ decoderReader = new MediaFormatReader(aDecoder, new MP3Demuxer(aDecoder->GetResource()));
} else
if (IsAACSupportedType(aType)) {
decoderReader = new MediaFormatReader(aDecoder, new ADTSDemuxer(aDecoder->GetResource()));
@@ -480,22 +387,10 @@ MediaDecoderReader* DecoderTraits::CreateReader(const nsACString& aType, Abstrac
if (IsOggSupportedType(aType)) {
decoderReader = new MediaFormatReader(aDecoder, new OggDemuxer(aDecoder->GetResource()));
} else
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled() &&
- EnsureAndroidMediaPluginHost()->FindDecoder(aType, nullptr)) {
- decoderReader = new AndroidMediaReader(aDecoder, aType);
- } else
-#endif
if (IsWebMSupportedType(aType)) {
decoderReader =
new MediaFormatReader(aDecoder, new WebMDemuxer(aDecoder->GetResource()));
- } else
-#ifdef MOZ_DIRECTSHOW
- if (IsDirectShowSupportedType(aType)) {
- decoderReader = new DirectShowReader(aDecoder);
- } else
-#endif
- if (false) {} // dummy if to take care of the dangling else
+ }
return decoderReader;
}
@@ -514,18 +409,12 @@ bool DecoderTraits::IsSupportedInVideoDocument(const nsACString& aType)
return
IsOggSupportedType(aType) ||
IsWebMSupportedType(aType) ||
-#ifdef MOZ_ANDROID_OMX
- (MediaDecoder::IsAndroidMediaPluginEnabled() && IsAndroidMediaType(aType)) ||
-#endif
#ifdef MOZ_FMP4
IsMP4SupportedType(aType, /* DecoderDoctorDiagnostics* */ nullptr) ||
#endif
IsMP3SupportedType(aType) ||
IsAACSupportedType(aType) ||
IsFlacSupportedType(aType) ||
-#ifdef MOZ_DIRECTSHOW
- IsDirectShowSupportedType(aType) ||
-#endif
false;
}
diff --git a/dom/media/GraphDriver.cpp b/dom/media/GraphDriver.cpp
index cae15eb8c..37c692a4b 100644
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -200,7 +200,7 @@ public:
STREAM_LOG(LogLevel::Debug, ("Starting system thread"));
profiler_register_thread("MediaStreamGraph", &aLocal);
LIFECYCLE_LOG("Starting a new system driver for graph %p\n",
- mDriver->mGraphImpl);
+ mDriver->mGraphImpl.get());
RefPtr<GraphDriver> previousDriver;
{
@@ -236,7 +236,7 @@ private:
void
ThreadedDriver::Start()
{
- LIFECYCLE_LOG("Starting thread for a SystemClockDriver %p\n", mGraphImpl);
+ LIFECYCLE_LOG("Starting thread for a SystemClockDriver %p\n", mGraphImpl.get());
Unused << NS_WARN_IF(mThread);
if (!mThread) { // Ensure we haven't already started it
nsCOMPtr<nsIRunnable> event = new MediaStreamGraphInitThreadRunnable(this);
@@ -623,16 +623,12 @@ AudioCallbackDriver::Init()
cubeb* cubebContext = CubebUtils::GetCubebContext();
if (!cubebContext) {
NS_WARNING("Could not get cubeb context.");
- if (!mFromFallback) {
- CubebUtils::ReportCubebStreamInitFailure(true);
- }
return;
}
cubeb_stream_params output;
cubeb_stream_params input;
uint32_t latency_frames;
- bool firstStream = CubebUtils::GetFirstStream();
MOZ_ASSERT(!NS_IsMainThread(),
"This is blocking and should never run on the main thread.");
@@ -710,18 +706,11 @@ AudioCallbackDriver::Init()
NS_WARNING_ASSERTION(
rv == CUBEB_OK,
"Could not set the audio stream volume in GraphDriver.cpp");
- CubebUtils::ReportCubebBackendUsed();
} else {
#ifdef MOZ_WEBRTC
StaticMutexAutoUnlock unlock(AudioInputCubeb::Mutex());
#endif
NS_WARNING("Could not create a cubeb stream for MediaStreamGraph, falling back to a SystemClockDriver");
- // Only report failures when we're not coming from a driver that was
- // created itself as a fallback driver because of a previous audio driver
- // failure.
- if (!mFromFallback) {
- CubebUtils::ReportCubebStreamInitFailure(firstStream);
- }
// Fall back to a driver using a normal thread. If needed,
// the graph will try to re-open an audio stream later.
MonitorAutoLock lock(GraphImpl()->GetMonitor());
@@ -761,7 +750,7 @@ AudioCallbackDriver::Destroy()
void
AudioCallbackDriver::Resume()
{
- STREAM_LOG(LogLevel::Debug, ("Resuming audio threads for MediaStreamGraph %p", mGraphImpl));
+ STREAM_LOG(LogLevel::Debug, ("Resuming audio threads for MediaStreamGraph %p", mGraphImpl.get()));
if (cubeb_stream_start(mAudioStream) != CUBEB_OK) {
NS_WARNING("Could not start cubeb stream for MSG.");
}
@@ -830,7 +819,9 @@ AudioCallbackDriver::Revive()
mGraphImpl->SetCurrentDriver(NextDriver());
NextDriver()->Start();
} else {
- STREAM_LOG(LogLevel::Debug, ("Starting audio threads for MediaStreamGraph %p from a new thread.", mGraphImpl));
+ STREAM_LOG(LogLevel::Debug,
+ ("Starting audio threads for MediaStreamGraph %p from a new thread.",
+ mGraphImpl.get()));
RefPtr<AsyncCubebTask> initEvent =
new AsyncCubebTask(this, AsyncCubebOperation::INIT);
initEvent->Dispatch();
diff --git a/dom/media/GraphDriver.h b/dom/media/GraphDriver.h
index 411e175d3..bb4f2689b 100644
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -210,10 +210,8 @@ protected:
// Time of the end of this graph iteration. This must be accessed while having
// the monitor.
GraphTime mIterationEnd;
- // The MediaStreamGraphImpl that owns this driver. This has a lifetime longer
- // than the driver, and will never be null. Hence, it can be accesed without
- // monitor.
- MediaStreamGraphImpl* mGraphImpl;
+ // The MediaStreamGraphImpl associated with this driver.
+ const RefPtr<MediaStreamGraphImpl> mGraphImpl;
// This enum specifies the wait state of the driver.
enum WaitState {
diff --git a/dom/media/MP3FrameParser.cpp b/dom/media/MP3FrameParser.cpp
deleted file mode 100644
index 242e3df00..000000000
--- a/dom/media/MP3FrameParser.cpp
+++ /dev/null
@@ -1,591 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include <algorithm>
-
-#include "nsMemory.h"
-#include "MP3FrameParser.h"
-#include "VideoUtils.h"
-
-
-#define FROM_BIG_ENDIAN(X) ((uint32_t)((uint8_t)(X)[0] << 24 | (uint8_t)(X)[1] << 16 | \
- (uint8_t)(X)[2] << 8 | (uint8_t)(X)[3]))
-
-
-namespace mozilla {
-
-/*
- * Following code taken from http://www.hydrogenaudio.org/forums/index.php?showtopic=85125
- * with permission from the author, Nick Wallette <sirnickity@gmail.com>.
- */
-
-/* BEGIN shameless copy and paste */
-
-// Bitrates - use [version][layer][bitrate]
-const uint16_t mpeg_bitrates[4][4][16] = {
- { // Version 2.5
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Reserved
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 3
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 2
- { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0 } // Layer 1
- },
- { // Reserved
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Invalid
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Invalid
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Invalid
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } // Invalid
- },
- { // Version 2
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Reserved
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 3
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 2
- { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0 } // Layer 1
- },
- { // Version 1
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Reserved
- { 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0 }, // Layer 3
- { 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0 }, // Layer 2
- { 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0 }, // Layer 1
- }
-};
-
-// Sample rates - use [version][srate]
-const uint16_t mpeg_srates[4][4] = {
- { 11025, 12000, 8000, 0 }, // MPEG 2.5
- { 0, 0, 0, 0 }, // Reserved
- { 22050, 24000, 16000, 0 }, // MPEG 2
- { 44100, 48000, 32000, 0 } // MPEG 1
-};
-
-// Samples per frame - use [version][layer]
-const uint16_t mpeg_frame_samples[4][4] = {
-// Rsvd 3 2 1 < Layer v Version
- { 0, 576, 1152, 384 }, // 2.5
- { 0, 0, 0, 0 }, // Reserved
- { 0, 576, 1152, 384 }, // 2
- { 0, 1152, 1152, 384 } // 1
-};
-
-// Slot size (MPEG unit of measurement) - use [layer]
-const uint8_t mpeg_slot_size[4] = { 0, 1, 1, 4 }; // Rsvd, 3, 2, 1
-
-uint16_t
-MP3Frame::CalculateLength()
-{
- // Lookup real values of these fields
- uint32_t bitrate = mpeg_bitrates[mVersion][mLayer][mBitrate] * 1000;
- uint32_t samprate = mpeg_srates[mVersion][mSampleRate];
- uint16_t samples = mpeg_frame_samples[mVersion][mLayer];
- uint8_t slot_size = mpeg_slot_size[mLayer];
-
- // In-between calculations
- float bps = (float)samples / 8.0;
- float fsize = ( (bps * (float)bitrate) / (float)samprate )
- + ( (mPad) ? slot_size : 0 );
-
- // Frame sizes are truncated integers
- return (uint16_t)fsize;
-}
-
-/* END shameless copy and paste */
-
-
-/** MP3Parser methods **/
-
-MP3Parser::MP3Parser()
- : mCurrentChar(0)
-{ }
-
-void
-MP3Parser::Reset()
-{
- mCurrentChar = 0;
-}
-
-uint16_t
-MP3Parser::ParseFrameLength(uint8_t ch)
-{
- mData.mRaw[mCurrentChar] = ch;
-
- MP3Frame &frame = mData.mFrame;
-
- // Validate MP3 header as we read. We can't mistake the start of an MP3 frame
- // for the middle of another frame due to the sync byte at the beginning
- // of the frame.
-
- // The only valid position for an all-high byte is the sync byte at the
- // beginning of the frame.
- if (ch == 0xff) {
- mCurrentChar = 0;
- }
-
- // Make sure the current byte is valid in context. If not, reset the parser.
- if (mCurrentChar == 2) {
- if (frame.mBitrate == 0x0f) {
- goto fail;
- }
- } else if (mCurrentChar == 1) {
- if (frame.mSync2 != 0x07
- || frame.mVersion == 0x01
- || frame.mLayer == 0x00) {
- goto fail;
- }
- }
-
- // The only valid character at the beginning of the header is 0xff. Fail if
- // it's different.
- if (mCurrentChar == 0 && frame.mSync1 != 0xff) {
- // Couldn't find the sync byte. Fail.
- return 0;
- }
-
- mCurrentChar++;
- MOZ_ASSERT(mCurrentChar <= sizeof(MP3Frame));
-
- // Don't have a full header yet.
- if (mCurrentChar < sizeof(MP3Frame)) {
- return 0;
- }
-
- // Woo, valid header. Return the length.
- mCurrentChar = 0;
- return frame.CalculateLength();
-
-fail:
- Reset();
- return 0;
-}
-
-uint32_t
-MP3Parser::GetSampleRate()
-{
- MP3Frame &frame = mData.mFrame;
- return mpeg_srates[frame.mVersion][frame.mSampleRate];
-}
-
-uint32_t
-MP3Parser::GetSamplesPerFrame()
-{
- MP3Frame &frame = mData.mFrame;
- return mpeg_frame_samples[frame.mVersion][frame.mLayer];
-}
-
-
-/** ID3Parser methods **/
-
-const char sID3Head[3] = { 'I', 'D', '3' };
-const uint32_t ID3_HEADER_LENGTH = 10;
-const uint32_t ID3_FOOTER_LENGTH = 10;
-const uint8_t ID3_FOOTER_PRESENT = 0x10;
-
-ID3Parser::ID3Parser()
- : mCurrentChar(0)
- , mVersion(0)
- , mFlags(0)
- , mHeaderLength(0)
-{ }
-
-void
-ID3Parser::Reset()
-{
- mCurrentChar = mVersion = mFlags = mHeaderLength = 0;
-}
-
-bool
-ID3Parser::ParseChar(char ch)
-{
- switch (mCurrentChar) {
- // The first three bytes of an ID3v2 header must match the string "ID3".
- case 0: case 1: case 2:
- if (ch != sID3Head[mCurrentChar]) {
- goto fail;
- }
- break;
- // The fourth and fifth bytes give the version, between 2 and 4.
- case 3:
- if (ch < '\2' || ch > '\4') {
- goto fail;
- }
- mVersion = uint8_t(ch);
- break;
- case 4:
- if (ch != '\0') {
- goto fail;
- }
- break;
- // The sixth byte gives the flags; valid flags depend on the version.
- case 5:
- if ((ch & (0xff >> mVersion)) != '\0') {
- goto fail;
- }
- mFlags = uint8_t(ch);
- break;
- // Bytes seven through ten give the sum of the byte length of the extended
- // header, the padding and the frames after unsynchronisation.
- // These bytes form a 28-bit integer, with the high bit of each byte unset.
- case 6: case 7: case 8: case 9:
- if (ch & 0x80) {
- goto fail;
- }
- mHeaderLength <<= 7;
- mHeaderLength |= ch;
- if (mCurrentChar == 9) {
- mHeaderLength += ID3_HEADER_LENGTH;
- mHeaderLength += (mFlags & ID3_FOOTER_PRESENT) ? ID3_FOOTER_LENGTH : 0;
- }
- break;
- default:
- MOZ_CRASH("Header already fully parsed!");
- }
-
- mCurrentChar++;
-
- return IsParsed();
-
-fail:
- if (mCurrentChar) {
- Reset();
- return ParseChar(ch);
- }
- Reset();
- return false;
-}
-
-bool
-ID3Parser::IsParsed() const
-{
- return mCurrentChar >= ID3_HEADER_LENGTH;
-}
-
-uint32_t
-ID3Parser::GetHeaderLength() const
-{
- MOZ_ASSERT(IsParsed(),
- "Queried length of ID3 header before parsing finished.");
- return mHeaderLength;
-}
-
-
-/** VBR header helper stuff **/
-
-// Helper function to find a VBR header in an MP3 frame.
-// Based on information from
-// http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
-
-const uint32_t VBRI_TAG = FROM_BIG_ENDIAN("VBRI");
-const uint32_t VBRI_OFFSET = 32 - sizeof(MP3Frame);
-const uint32_t VBRI_FRAME_COUNT_OFFSET = VBRI_OFFSET + 14;
-const uint32_t VBRI_MIN_FRAME_SIZE = VBRI_OFFSET + 26;
-
-const uint32_t XING_TAG = FROM_BIG_ENDIAN("Xing");
-enum XingFlags {
- XING_HAS_NUM_FRAMES = 0x01,
- XING_HAS_NUM_BYTES = 0x02,
- XING_HAS_TOC = 0x04,
- XING_HAS_VBR_SCALE = 0x08
-};
-
-static int64_t
-ParseXing(const char *aBuffer)
-{
- uint32_t flags = FROM_BIG_ENDIAN(aBuffer + 4);
-
- if (!(flags & XING_HAS_NUM_FRAMES)) {
- NS_WARNING("VBR file without frame count. Duration estimation likely to "
- "be totally wrong.");
- return -1;
- }
-
- int64_t numFrames = -1;
- if (flags & XING_HAS_NUM_FRAMES) {
- numFrames = FROM_BIG_ENDIAN(aBuffer + 8);
- }
-
- return numFrames;
-}
-
-static int64_t
-FindNumVBRFrames(const nsCString& aFrame)
-{
- const char *buffer = aFrame.get();
- const char *bufferEnd = aFrame.get() + aFrame.Length();
-
- // VBRI header is nice and well-defined; let's try to find that first.
- if (aFrame.Length() > VBRI_MIN_FRAME_SIZE &&
- FROM_BIG_ENDIAN(buffer + VBRI_OFFSET) == VBRI_TAG) {
- return FROM_BIG_ENDIAN(buffer + VBRI_FRAME_COUNT_OFFSET);
- }
-
- // We have to search for the Xing header as its position can change.
- for (; buffer + sizeof(XING_TAG) < bufferEnd; buffer++) {
- if (FROM_BIG_ENDIAN(buffer) == XING_TAG) {
- return ParseXing(buffer);
- }
- }
-
- return -1;
-}
-
-
-/** MP3FrameParser methods **/
-
-// Some MP3's have large ID3v2 tags, up to 150KB, so we allow lots of
-// skipped bytes to be read, just in case, before we give up and assume
-// we're not parsing an MP3 stream.
-static const uint32_t MAX_SKIPPED_BYTES = 4096;
-
-enum {
- MP3_HEADER_LENGTH = 4,
-};
-
-MP3FrameParser::MP3FrameParser(int64_t aLength)
-: mLock("MP3FrameParser.mLock"),
- mTotalID3Size(0),
- mTotalFrameSize(0),
- mFrameCount(0),
- mOffset(0),
- mLength(aLength),
- mMP3Offset(-1),
- mSamplesPerSecond(0),
- mFirstFrameEnd(-1),
- mIsMP3(MAYBE_MP3)
-{ }
-
-nsresult MP3FrameParser::ParseBuffer(const uint8_t* aBuffer,
- uint32_t aLength,
- int64_t aStreamOffset,
- uint32_t* aOutBytesRead)
-{
- // Iterate forwards over the buffer, looking for ID3 tag, or MP3
- // Frame headers.
- const uint8_t *buffer = aBuffer;
- const uint8_t *bufferEnd = aBuffer + aLength;
-
- // If we haven't found any MP3 frame data yet, there might be ID3 headers
- // we can skip over.
- if (mMP3Offset < 0) {
- for (const uint8_t *ch = buffer; ch < bufferEnd; ch++) {
- if (mID3Parser.ParseChar(*ch)) {
- // Found an ID3 header. We don't care about the body of the header, so
- // just skip past.
- buffer = ch + mID3Parser.GetHeaderLength() - (ID3_HEADER_LENGTH - 1);
-
- if (buffer <= ch) {
- return NS_ERROR_FAILURE;
- }
-
- ch = buffer;
-
- mTotalID3Size += mID3Parser.GetHeaderLength();
-
- // Yes, this is an MP3!
- mIsMP3 = DEFINITELY_MP3;
-
- mID3Parser.Reset();
- }
- }
- }
-
- // The first MP3 frame in a variable bitrate stream can contain metadata
- // for duration estimation and seeking, so we buffer that first frame here.
- if (aStreamOffset < mFirstFrameEnd) {
- uint64_t copyLen = std::min((int64_t)aLength, mFirstFrameEnd - aStreamOffset);
- mFirstFrame.Append((const char *)buffer, copyLen);
- buffer += copyLen;
- }
-
- while (buffer < bufferEnd) {
- uint16_t frameLen = mMP3Parser.ParseFrameLength(*buffer);
-
- if (frameLen) {
- // We've found an MP3 frame!
- // This is the first frame (and the only one we'll bother parsing), so:
- // * Mark this stream as MP3;
- // * Store the offset at which the MP3 data started; and
- // * Start buffering the frame, as it might contain handy metadata.
-
- // We're now sure this is an MP3 stream.
- mIsMP3 = DEFINITELY_MP3;
-
- // We need to know these to convert the number of frames in the stream
- // to the length of the stream in seconds.
- mSamplesPerSecond = mMP3Parser.GetSampleRate();
- mSamplesPerFrame = mMP3Parser.GetSamplesPerFrame();
-
- // If the stream has a constant bitrate, we should only need the length
- // of the first frame and the length (in bytes) of the stream to
- // estimate the length (in seconds).
- mTotalFrameSize += frameLen;
- mFrameCount++;
-
- // If |mMP3Offset| isn't set then this is the first MP3 frame we have
- // seen in the stream, which is useful for duration estimation.
- if (mMP3Offset > -1) {
- uint16_t skip = frameLen - sizeof(MP3Frame);
- buffer += skip ? skip : 1;
- continue;
- }
-
- // Remember the offset of the MP3 stream.
- // We're at the last byte of an MP3Frame, so MP3 data started
- // sizeof(MP3Frame) - 1 bytes ago.
- mMP3Offset = aStreamOffset
- + (buffer - aBuffer)
- - (sizeof(MP3Frame) - 1);
-
- buffer++;
-
- // If the stream has a variable bitrate, the first frame has metadata
- // we need for duration estimation and seeking. Start buffering it so we
- // can parse it later.
- mFirstFrameEnd = mMP3Offset + frameLen;
- uint64_t currOffset = buffer - aBuffer + aStreamOffset;
- uint64_t copyLen = std::min(mFirstFrameEnd - currOffset,
- (uint64_t)(bufferEnd - buffer));
- mFirstFrame.Append((const char *)buffer, copyLen);
-
- buffer += copyLen;
-
- } else {
- // Nothing to see here. Move along.
- buffer++;
- }
- }
-
- *aOutBytesRead = buffer - aBuffer;
-
- if (mFirstFrameEnd > -1 && mFirstFrameEnd <= aStreamOffset + buffer - aBuffer) {
- // We have our whole first frame. Try to find a VBR header.
- mNumFrames = FindNumVBRFrames(mFirstFrame);
- mFirstFrameEnd = -1;
- }
-
- return NS_OK;
-}
-
-void MP3FrameParser::Parse(const uint8_t* aBuffer, uint32_t aLength, uint64_t aOffset)
-{
- MutexAutoLock mon(mLock);
-
- if (HasExactDuration()) {
- // We know the duration; nothing to do here.
- return;
- }
-
- const uint8_t* buffer = aBuffer;
- int32_t length = aLength;
- uint64_t offset = aOffset;
-
- // Got some data we have seen already. Skip forward to what we need.
- if (aOffset < mOffset) {
- buffer += mOffset - aOffset;
- length -= mOffset - aOffset;
- offset = mOffset;
-
- if (length <= 0) {
- return;
- }
- }
-
- // If there is a discontinuity in the input stream, reset the state of the
- // parsers so we don't get any partial headers.
- if (mOffset < aOffset) {
- if (!mID3Parser.IsParsed()) {
- // Only reset this if it hasn't finished yet.
- mID3Parser.Reset();
- }
-
- if (mFirstFrameEnd > -1) {
- NS_WARNING("Discontinuity in input while buffering first frame.");
- mFirstFrameEnd = -1;
- }
-
- mMP3Parser.Reset();
- }
-
- uint32_t bytesRead = 0;
- if (NS_FAILED(ParseBuffer(buffer,
- length,
- offset,
- &bytesRead))) {
- return;
- }
-
- MOZ_ASSERT(length <= (int)bytesRead, "All bytes should have been consumed");
-
- // Update next data offset
- mOffset = offset + bytesRead;
-
- // If we've parsed lots of data and we still have nothing, just give up.
- // We don't count ID3 headers towards the skipped bytes count, as MP3 files
- // can have massive ID3 sections.
- if (!mID3Parser.IsParsed() && mMP3Offset < 0 &&
- mOffset - mTotalID3Size > MAX_SKIPPED_BYTES) {
- mIsMP3 = NOT_MP3;
- }
-}
-
-int64_t MP3FrameParser::GetDuration()
-{
- MutexAutoLock mon(mLock);
-
- if (!ParsedHeaders() || !mSamplesPerSecond) {
- // Not a single frame decoded yet.
- return -1;
- }
-
- MOZ_ASSERT(mFrameCount > 0 && mTotalFrameSize > 0,
- "Frame parser should have seen at least one MP3 frame of positive length.");
-
- if (!mFrameCount || !mTotalFrameSize) {
- // This should never happen.
- return -1;
- }
-
- double frames;
- if (mNumFrames < 0) {
- // Estimate the number of frames in the stream based on the average frame
- // size and the length of the MP3 file.
- double frameSize = (double)mTotalFrameSize / mFrameCount;
- frames = (double)(mLength - mMP3Offset) / frameSize;
- } else {
- // We know the exact number of frames from the VBR header.
- frames = mNumFrames;
- }
-
- // The duration of each frame is constant over a given stream.
- double usPerFrame = USECS_PER_S * mSamplesPerFrame / mSamplesPerSecond;
-
- return frames * usPerFrame;
-}
-
-int64_t MP3FrameParser::GetMP3Offset()
-{
- MutexAutoLock mon(mLock);
- return mMP3Offset;
-}
-
-bool MP3FrameParser::ParsedHeaders()
-{
- // We have seen both the beginning and the end of the first MP3 frame in the
- // stream.
- return mMP3Offset > -1 && mFirstFrameEnd < 0;
-}
-
-bool MP3FrameParser::HasExactDuration()
-{
- return ParsedHeaders() && mNumFrames > -1;
-}
-
-bool MP3FrameParser::NeedsData()
-{
- // If we don't know the duration exactly then either:
- // - we're still waiting for a VBR header; or
- // - we look at all frames to constantly update our duration estimate.
- return IsMP3() && !HasExactDuration();
-}
-
-} // namespace mozilla
diff --git a/dom/media/MP3FrameParser.h b/dom/media/MP3FrameParser.h
deleted file mode 100644
index d2ba791fd..000000000
--- a/dom/media/MP3FrameParser.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef MP3FrameParser_h
-#define MP3FrameParser_h
-
-#include <stdint.h>
-
-#include "mozilla/Mutex.h"
-#include "nsString.h"
-#include "Intervals.h"
-
-namespace mozilla {
-
-// Simple parser to tell whether we've found an ID3 header and how long it is,
-// so that we can skip it.
-// XXX maybe actually parse this stuff?
-class ID3Parser
-{
-public:
- ID3Parser();
-
- void Reset();
- bool ParseChar(char ch);
- bool IsParsed() const;
- uint32_t GetHeaderLength() const;
-
-private:
- uint32_t mCurrentChar;
- uint8_t mVersion;
- uint8_t mFlags;
- uint32_t mHeaderLength;
-};
-
-struct MP3Frame {
- uint16_t mSync1 : 8; // Always all set
- uint16_t mProtected : 1; // Ignored
- uint16_t mLayer : 2;
- uint16_t mVersion : 2;
- uint16_t mSync2 : 3; // Always all set
- uint16_t mPrivate : 1; // Ignored
- uint16_t mPad : 1;
- uint16_t mSampleRate : 2; // Index into mpeg_srates above
- uint16_t mBitrate : 4; // Index into mpeg_bitrates above
-
- uint16_t CalculateLength();
-};
-
-// Buffering parser for MP3 frames.
-class MP3Parser
-{
-public:
- MP3Parser();
-
- // Forget all data the parser has seen so far.
- void Reset();
-
- // Parse the given byte. If we have found a frame header, return the length of
- // the frame.
- uint16_t ParseFrameLength(uint8_t ch);
-
- // Get the sample rate from the current header.
- uint32_t GetSampleRate();
-
- // Get the number of samples per frame.
- uint32_t GetSamplesPerFrame();
-
-private:
- uint32_t mCurrentChar;
- union {
- uint8_t mRaw[3];
- MP3Frame mFrame;
- } mData;
-};
-
-
-// A description of the MP3 format and its extensions is available at
-//
-// http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
-//
-// The data in MP3 streams is split into small frames, with each frame
-// containing a fixed number of samples. The duration of a frame depends
-// on the frame's bit rate and sample rate. Both values can vary among
-// frames, so it is necessary to examine each individual frame of an MP3
-// stream to calculate the stream's overall duration.
-//
-// The MP3 frame parser extracts information from an MP3 data stream. It
-// accepts a range of frames of an MP3 stream as input, and parses all
-// frames for their duration. Callers can query the stream's overall
-// duration from the parser.
-//
-// Call the methods NotifyDataArrived or Parse to add new data. If you added
-// information for a certain stream position, you cannot go back to previous
-// positions. The parser will simply ignore the input. If you skip stream
-// positions, the duration of the related MP3 frames will be estimated from
-// the stream's average.
-//
-// The method GetDuration returns calculated duration of the stream, including
-// estimates for skipped ranges.
-//
-// All public methods are thread-safe.
-
-class MP3FrameParser
-{
-public:
- explicit MP3FrameParser(int64_t aLength=-1);
-
- bool IsMP3() {
- MutexAutoLock mon(mLock);
- return mIsMP3 != NOT_MP3;
- }
-
- void Parse(const uint8_t* aBuffer, uint32_t aLength, uint64_t aStreamOffset);
-
- // Returns the duration, in microseconds. If the entire stream has not
- // been parsed yet, this is an estimate based on the bitrate of the
- // frames parsed so far.
- int64_t GetDuration();
-
- // Returns the offset of the first MP3 frame in the stream, or -1 of
- // no MP3 frame has been detected yet.
- int64_t GetMP3Offset();
-
- // Returns true if we've seen the whole first frame of the MP3 stream, and
- // therefore can make an estimate on the stream duration.
- // Otherwise, returns false.
- bool ParsedHeaders();
-
- // Returns true if we know the exact duration of the MP3 stream;
- // false otherwise.
- bool HasExactDuration();
-
- // Returns true if the parser needs more data for duration estimation.
- bool NeedsData();
- // Assign the total lenght of this mp3 stream
- void SetLength(int64_t aLength) {
- MutexAutoLock mon(mLock);
- mLength = aLength;
- }
-private:
-
- // Parses aBuffer, starting at offset 0. Returns the number of bytes
- // parsed, relative to the start of the buffer. Note this may be
- // greater than aLength if the headers in the buffer indicate that
- // the frame or ID3 tag extends outside of aBuffer. Returns failure
- // if too many non-MP3 bytes are parsed.
- nsresult ParseBuffer(const uint8_t* aBuffer,
- uint32_t aLength,
- int64_t aStreamOffset,
- uint32_t* aOutBytesRead);
-
- // A low-contention lock for protecting the parser results
- Mutex mLock;
-
- // ID3 header parser. Keeps state between reads in case the header falls
- // in between.
- ID3Parser mID3Parser;
-
- // MP3 frame header parser.
- MP3Parser mMP3Parser;
-
- // If we read |MAX_SKIPPED_BYTES| from the stream without finding any MP3
- // frames, we give up and report |NOT_MP3|. Here we track the cumulative size
- // of any ID3 headers we've seen so big ID3 sections aren't counted towards
- // skipped bytes.
- uint32_t mTotalID3Size;
-
- // All fields below are protected by mLock
-
- // We keep stats on the size of all the frames we've seen, as well as how many
- // so that we can estimate the duration of the rest of the stream.
- uint64_t mTotalFrameSize;
- uint64_t mFrameCount;
-
- // Offset of the last data parsed. This is the end offset of the last data
- // block parsed, so it's the start offset we expect to get on the next
- // call to Parse().
- uint64_t mOffset;
-
- // Total length of the stream in bytes.
- int64_t mLength;
-
- // Offset of first MP3 frame in the bitstream. Has value -1 until the
- // first MP3 frame is found.
- int64_t mMP3Offset;
-
- // The exact number of frames in this stream, if we know it. -1 otherwise.
- int64_t mNumFrames;
-
- // Number of audio samples per second and per frame. Fixed through the whole
- // file. If we know these variables as well as the number of frames in the
- // file, we can get an exact duration for the stream.
- uint16_t mSamplesPerSecond;
- uint16_t mSamplesPerFrame;
-
- // If the MP3 has a variable bitrate, then there *should* be metadata about
- // the encoding in the first frame. We buffer the first frame here.
- nsCString mFirstFrame;
-
- // While we are reading the first frame, this is the stream offset of the
- // last byte of that frame. -1 at all other times.
- int64_t mFirstFrameEnd;
-
- enum eIsMP3 {
- MAYBE_MP3, // We're giving the stream the benefit of the doubt...
- DEFINITELY_MP3, // We've hit at least one ID3 tag or MP3 frame.
- NOT_MP3 // Not found any evidence of the stream being MP3.
- };
-
- eIsMP3 mIsMP3;
-
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/MediaData.h b/dom/media/MediaData.h
index a79aac6ed..905b4c1d9 100644
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -14,6 +14,7 @@
#include "nsIMemoryReporter.h"
#include "SharedBuffer.h"
#include "mozilla/RefPtr.h"
+#include "mozilla/Span.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/UniquePtrExtensions.h"
#include "nsTArray.h"
@@ -631,6 +632,8 @@ public:
{
return sizeof(*this) + mBuffer.ComputedSizeOfExcludingThis();
}
+ // Access the buffer as a Span.
+ operator Span<const uint8_t>() { return MakeSpan(Data(), Size()); }
const CryptoSample& mCrypto;
RefPtr<MediaByteBuffer> mExtraData;
diff --git a/dom/media/MediaDecoder.cpp b/dom/media/MediaDecoder.cpp
index 9334d1bcb..223c59c3b 100644
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -30,15 +30,10 @@
#include "mozilla/dom/VideoTrack.h"
#include "mozilla/dom/VideoTrackList.h"
#include "nsPrintfCString.h"
-#include "mozilla/Telemetry.h"
#include "GMPService.h"
#include "Layers.h"
#include "mozilla/layers/ShadowLayers.h"
-#ifdef MOZ_ANDROID_OMX
-#include "AndroidBridge.h"
-#endif
-
using namespace mozilla::dom;
using namespace mozilla::layers;
using namespace mozilla::media;
@@ -425,7 +420,6 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
, INIT_CANONICAL(mPlaybackRateReliable, true)
, INIT_CANONICAL(mDecoderPosition, 0)
, INIT_CANONICAL(mIsVisible, !aOwner->IsHidden())
- , mTelemetryReported(false)
{
MOZ_COUNT_CTOR(MediaDecoder);
MOZ_ASSERT(NS_IsMainThread());
@@ -845,42 +839,6 @@ MediaDecoder::MetadataLoaded(nsAutoPtr<MediaInfo> aInfo,
// So we call Invalidate() after calling mOwner->MetadataLoaded to ensure
// the media element has the latest dimensions.
Invalidate();
-
- EnsureTelemetryReported();
-}
-
-void
-MediaDecoder::EnsureTelemetryReported()
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- if (mTelemetryReported || !mInfo) {
- // Note: sometimes we get multiple MetadataLoaded calls (for example
- // for chained ogg). So we ensure we don't report duplicate results for
- // these resources.
- return;
- }
-
- nsTArray<nsCString> codecs;
- if (mInfo->HasAudio() && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
- codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
- }
- if (mInfo->HasVideo() && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
- codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
- }
- if (codecs.IsEmpty()) {
- if (mResource->GetContentType().IsEmpty()) {
- NS_WARNING("Somehow the resource's content type is empty");
- return;
- }
- codecs.AppendElement(nsPrintfCString("resource; %s", mResource->GetContentType().get()));
- }
- for (const nsCString& codec : codecs) {
- DECODER_LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
- Telemetry::Accumulate(Telemetry::ID::MEDIA_CODEC_USED, codec);
- }
-
- mTelemetryReported = true;
}
const char*
@@ -1618,16 +1576,6 @@ MediaDecoder::IsWebMEnabled()
return Preferences::GetBool("media.webm.enabled");
}
-#ifdef MOZ_ANDROID_OMX
-bool
-MediaDecoder::IsAndroidMediaPluginEnabled()
-{
- return AndroidBridge::Bridge() &&
- AndroidBridge::Bridge()->GetAPIVersion() < 16 &&
- Preferences::GetBool("media.plugins.enabled");
-}
-#endif
-
NS_IMETHODIMP
MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize)
diff --git a/dom/media/MediaDecoder.h b/dom/media/MediaDecoder.h
index a4edcbe72..05e88db8b 100644
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -440,17 +440,11 @@ private:
void SetCDMProxy(CDMProxy* aProxy);
- void EnsureTelemetryReported();
-
static bool IsOggEnabled();
static bool IsOpusEnabled();
static bool IsWaveEnabled();
static bool IsWebMEnabled();
-#ifdef MOZ_ANDROID_OMX
- static bool IsAndroidMediaPluginEnabled();
-#endif
-
#ifdef MOZ_WMF
static bool IsWMFEnabled();
#endif
@@ -856,8 +850,6 @@ private:
// download has ended. Called on the main thread only. aStatus is
// the result from OnStopRequest.
void NotifyDownloadEnded(nsresult aStatus);
-
- bool mTelemetryReported;
};
} // namespace mozilla
diff --git a/dom/media/MediaDecoderStateMachine.cpp b/dom/media/MediaDecoderStateMachine.cpp
index c586139ad..2ed1956c9 100644
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1173,60 +1173,12 @@ StateObject::HandleShutdown()
return SetState<ShutdownState>();
}
-static void
-ReportRecoveryTelemetry(const TimeStamp& aRecoveryStart,
- const MediaInfo& aMediaInfo,
- bool aIsHardwareAccelerated)
-{
- MOZ_ASSERT(NS_IsMainThread());
- if (!aMediaInfo.HasVideo()) {
- return;
- }
-
- // Keyed by audio+video or video alone, hardware acceleration,
- // and by a resolution range.
- nsCString key(aMediaInfo.HasAudio() ? "AV" : "V");
- key.AppendASCII(aIsHardwareAccelerated ? "(hw)," : ",");
- static const struct { int32_t mH; const char* mRes; } sResolutions[] = {
- { 240, "0-240" },
- { 480, "241-480" },
- { 720, "481-720" },
- { 1080, "721-1080" },
- { 2160, "1081-2160" }
- };
- const char* resolution = "2161+";
- int32_t height = aMediaInfo.mVideo.mImage.height;
- for (const auto& res : sResolutions) {
- if (height <= res.mH) {
- resolution = res.mRes;
- break;
- }
- }
- key.AppendASCII(resolution);
-
- TimeDuration duration = TimeStamp::Now() - aRecoveryStart;
- double duration_ms = duration.ToMilliseconds();
- Telemetry::Accumulate(Telemetry::VIDEO_SUSPEND_RECOVERY_TIME_MS,
- key,
- uint32_t(duration_ms + 0.5));
- Telemetry::Accumulate(Telemetry::VIDEO_SUSPEND_RECOVERY_TIME_MS,
- NS_LITERAL_CSTRING("All"),
- uint32_t(duration_ms + 0.5));
-}
-
void
MediaDecoderStateMachine::
StateObject::HandleResumeVideoDecoding()
{
MOZ_ASSERT(mMaster->mVideoDecodeSuspended);
- // Start counting recovery time from right now.
- TimeStamp start = TimeStamp::Now();
-
- // Local reference to mInfo, so that it will be copied in the lambda below.
- auto& info = Info();
- bool hw = Reader()->VideoIsHardwareAccelerated();
-
// Start video-only seek to the current time.
SeekJob seekJob;
@@ -1238,10 +1190,7 @@ StateObject::HandleResumeVideoDecoding()
type,
true /* aVideoOnly */);
- SetState<SeekingState>(Move(seekJob), EventVisibility::Suppressed)->Then(
- AbstractThread::MainThread(), __func__,
- [start, info, hw](){ ReportRecoveryTelemetry(start, info, hw); },
- [](){});
+ SetState<SeekingState>(Move(seekJob), EventVisibility::Suppressed);
}
void
diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp
index 06e8b963b..773434710 100644
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -32,6 +32,10 @@ using mozilla::layers::Image;
using mozilla::layers::LayerManager;
using mozilla::layers::LayersBackend;
+// avoid redefined macro warning in unified builds
+#undef LOG
+#undef LOGV
+
static mozilla::LazyLogModule sFormatDecoderLog("MediaFormatReader");
mozilla::LazyLogModule gMediaDemuxerLog("MediaDemuxer");
diff --git a/dom/media/MediaManager.cpp b/dom/media/MediaManager.cpp
index ba6b4cd47..979cb64c7 100644
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -32,7 +32,6 @@
#include "nsAppDirectoryServiceDefs.h"
#include "nsIInputStream.h"
#include "nsILineInputStream.h"
-#include "mozilla/Telemetry.h"
#include "mozilla/Types.h"
#include "mozilla/PeerIdentity.h"
#include "mozilla/dom/ContentChild.h"
@@ -2034,7 +2033,6 @@ MediaManager::GetUserMedia(nsPIDOMWindowInner* aWindow,
}
// Determine permissions early (while we still have a stack).
-
nsIURI* docURI = aWindow->GetDocumentURI();
if (!docURI) {
return NS_ERROR_UNEXPECTED;
@@ -2044,48 +2042,23 @@ MediaManager::GetUserMedia(nsPIDOMWindowInner* aWindow,
Preferences::GetBool("media.navigator.permission.disabled", false);
bool isHTTPS = false;
docURI->SchemeIs("https", &isHTTPS);
- nsCString host;
- nsresult rv = docURI->GetHost(host);
- // Test for some other schemes that ServiceWorker recognizes
- bool isFile;
- docURI->SchemeIs("file", &isFile);
- bool isApp;
- docURI->SchemeIs("app", &isApp);
- // Same localhost check as ServiceWorkers uses
- // (see IsOriginPotentiallyTrustworthy())
- bool isLocalhost = NS_SUCCEEDED(rv) &&
- (host.LowerCaseEqualsLiteral("localhost") ||
- host.LowerCaseEqualsLiteral("127.0.0.1") ||
- host.LowerCaseEqualsLiteral("::1"));
-
- // Record telemetry about whether the source of the call was secure, i.e.,
- // privileged or HTTPS. We may handle other cases
-if (privileged) {
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN,
- (uint32_t) GetUserMediaSecurityState::Privileged);
- } else if (isHTTPS) {
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN,
- (uint32_t) GetUserMediaSecurityState::HTTPS);
- } else if (isFile) {
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN,
- (uint32_t) GetUserMediaSecurityState::File);
- } else if (isApp) {
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN,
- (uint32_t) GetUserMediaSecurityState::App);
- } else if (isLocalhost) {
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN,
- (uint32_t) GetUserMediaSecurityState::Localhost);
- } else {
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN,
- (uint32_t) GetUserMediaSecurityState::Other);
- }
nsCString origin;
- rv = nsPrincipal::GetOriginForURI(docURI, origin);
+ nsresult rv = nsPrincipal::GetOriginForURI(docURI, origin);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
+ // Disallow access to null principal pages
+ nsCOMPtr<nsIPrincipal> principal = aWindow->GetExtantDoc()->NodePrincipal();
+ if (principal->GetIsNullPrincipal()) {
+ RefPtr<MediaStreamError> error =
+ new MediaStreamError(aWindow,
+ NS_LITERAL_STRING("NotAllowedError"));
+ onFailure->OnError(error);
+ return NS_OK;
+ }
+
if (!Preferences::GetBool("media.navigator.video.enabled", true)) {
c.mVideo.SetAsBoolean() = false;
}
@@ -2098,8 +2071,6 @@ if (privileged) {
videoType = StringToEnum(dom::MediaSourceEnumValues::strings,
vc.mMediaSource,
MediaSourceEnum::Other);
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_TYPE,
- (uint32_t) videoType);
switch (videoType) {
case MediaSourceEnum::Camera:
break;
@@ -2182,8 +2153,6 @@ if (privileged) {
ac.mMediaSource.AssignASCII(EnumToASCII(dom::MediaSourceEnumValues::strings,
audioType));
}
- Telemetry::Accumulate(Telemetry::WEBRTC_GET_USER_MEDIA_TYPE,
- (uint32_t) audioType);
switch (audioType) {
case MediaSourceEnum::Microphone:
@@ -2229,7 +2198,6 @@ if (privileged) {
StreamListeners* listeners = AddWindowID(windowID);
// Create a disabled listener to act as a placeholder
- nsIPrincipal* principal = aWindow->GetExtantDoc()->NodePrincipal();
RefPtr<GetUserMediaCallbackMediaStreamListener> listener =
new GetUserMediaCallbackMediaStreamListener(mMediaThread, windowID,
MakePrincipalHandle(principal));
diff --git a/dom/media/MediaPrefs.h b/dom/media/MediaPrefs.h
index b237ecd3d..c67a89989 100644
--- a/dom/media/MediaPrefs.h
+++ b/dom/media/MediaPrefs.h
@@ -105,9 +105,6 @@ private:
DECL_MEDIA_PREF("media.eme.enabled", EMEEnabled, bool, false);
DECL_MEDIA_PREF("media.use-blank-decoder", PDMUseBlankDecoder, bool, false);
DECL_MEDIA_PREF("media.gpu-process-decoder", PDMUseGPUDecoder, bool, false);
-#ifdef MOZ_GONK_MEDIACODEC
- DECL_MEDIA_PREF("media.gonk.enabled", PDMGonkDecoderEnabled, bool, true);
-#endif
#ifdef MOZ_WIDGET_ANDROID
DECL_MEDIA_PREF("media.android-media-codec.enabled", PDMAndroidMediaCodecEnabled, bool, false);
DECL_MEDIA_PREF("media.android-media-codec.preferred", PDMAndroidMediaCodecPreferred, bool, false);
@@ -120,6 +117,9 @@ private:
#ifdef MOZ_FFVPX
DECL_MEDIA_PREF("media.ffvpx.enabled", PDMFFVPXEnabled, bool, true);
#endif
+#ifdef MOZ_AV1
+ DECL_MEDIA_PREF("media.av1.enabled", AV1Enabled, bool, false);
+#endif
#ifdef XP_WIN
DECL_MEDIA_PREF("media.wmf.enabled", PDMWMFEnabled, bool, true);
DECL_MEDIA_PREF("media.wmf.skip-blacklist", PDMWMFSkipBlacklist, bool, false);
diff --git a/dom/media/MediaResource.cpp b/dom/media/MediaResource.cpp
index d36783be7..84a8d67fd 100644
--- a/dom/media/MediaResource.cpp
+++ b/dom/media/MediaResource.cpp
@@ -1525,7 +1525,7 @@ void BaseMediaResource::SetLoadInBackground(bool aLoadInBackground) {
NS_WARNING("Null owner in MediaResource::SetLoadInBackground()");
return;
}
- dom::HTMLMediaElement* element = owner->GetMediaElement();
+ RefPtr<dom::HTMLMediaElement> element = owner->GetMediaElement();
if (!element) {
NS_WARNING("Null element in MediaResource::SetLoadInBackground()");
return;
diff --git a/dom/media/MediaStreamGraph.cpp b/dom/media/MediaStreamGraph.cpp
index e2934cbb2..1b9e4f674 100644
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -3371,7 +3371,8 @@ MediaStreamGraphImpl::Destroy()
// First unregister from memory reporting.
UnregisterWeakMemoryReporter(this);
- // Clear the self reference which will destroy this instance.
+ // Clear the self reference which will destroy this instance if all
+ // associated GraphDrivers are destroyed.
mSelfRef = nullptr;
}
diff --git a/dom/media/ThreadPoolCOMListener.h b/dom/media/ThreadPoolCOMListener.h
index 881013a78..424ca65d2 100644
--- a/dom/media/ThreadPoolCOMListener.h
+++ b/dom/media/ThreadPoolCOMListener.h
@@ -13,8 +13,8 @@
namespace mozilla {
// Thread pool listener which ensures that MSCOM is initialized and
-// deinitialized on the thread pool thread. We may call into WMF or
-// DirectShow on this thread, so we need MSCOM working.
+// deinitialized on the thread pool thread. We may call into WMF on
+// this thread, so we need MSCOM working.
class MSCOMInitThreadPoolListener final : public nsIThreadPoolListener {
~MSCOMInitThreadPoolListener() {}
public:
diff --git a/dom/media/VideoFrameContainer.cpp b/dom/media/VideoFrameContainer.cpp
index 2b1965766..56aea9d27 100644
--- a/dom/media/VideoFrameContainer.cpp
+++ b/dom/media/VideoFrameContainer.cpp
@@ -61,7 +61,7 @@ void VideoFrameContainer::UpdatePrincipalHandleForFrameIDLocked(const PrincipalH
mFrameIDForPendingPrincipalHandle = aFrameID;
}
-static void
+static bool
SetImageToBlackPixel(PlanarYCbCrImage* aImage)
{
uint8_t blackPixel[] = { 0x10, 0x80, 0x80 };
@@ -72,7 +72,7 @@ SetImageToBlackPixel(PlanarYCbCrImage* aImage)
data.mCrChannel = blackPixel + 2;
data.mYStride = data.mCbCrStride = 1;
data.mPicSize = data.mYSize = data.mCbCrSize = gfx::IntSize(1, 1);
- aImage->CopyData(data);
+ return aImage->CopyData(data);
}
class VideoFrameContainerInvalidateRunnable : public Runnable {
@@ -122,11 +122,13 @@ void VideoFrameContainer::SetCurrentFrames(const VideoSegment& aSegment)
if (frame->GetForceBlack()) {
if (!mBlackImage) {
- mBlackImage = GetImageContainer()->CreatePlanarYCbCrImage();
- if (mBlackImage) {
+ RefPtr<Image> blackImage = GetImageContainer()->CreatePlanarYCbCrImage();
+ if (blackImage) {
// Sets the image to a single black pixel, which will be scaled to
// fill the rendered size.
- SetImageToBlackPixel(mBlackImage->AsPlanarYCbCrImage());
+ if (SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage())) {
+ mBlackImage = blackImage;
+ }
}
}
if (mBlackImage) {
diff --git a/dom/media/VideoUtils.cpp b/dom/media/VideoUtils.cpp
index 5c00e54bc..c06ba9070 100644
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -6,7 +6,6 @@
#include "mozilla/Base64.h"
#include "mozilla/TaskQueue.h"
-#include "mozilla/Telemetry.h"
#include "mozilla/Function.h"
#include "MediaContentType.h"
@@ -208,8 +207,20 @@ already_AddRefed<SharedThreadPool> GetMediaThreadPool(MediaThreadType aType)
name = "MediaPlayback";
break;
}
- return SharedThreadPool::
+ RefPtr<SharedThreadPool> pool = SharedThreadPool::
Get(nsDependentCString(name), MediaPrefs::MediaThreadPoolDefaultCount());
+
+ // Ensure a larger stack for platform decoder threads
+ if (aType == MediaThreadType::PLATFORM_DECODER) {
+ const uint32_t minStackSize = 512*1024;
+ uint32_t stackSize;
+ MOZ_ALWAYS_SUCCEEDS(pool->GetThreadStackSize(&stackSize));
+ if (stackSize < minStackSize) {
+ MOZ_ALWAYS_SUCCEEDS(pool->SetThreadStackSize(minStackSize));
+ }
+ }
+
+ return pool.forget();
}
bool
@@ -248,24 +259,6 @@ ExtractH264CodecDetails(const nsAString& aCodec,
aLevel *= 10;
}
- // Capture the constraint_set flag value for the purpose of Telemetry.
- // We don't NS_ENSURE_SUCCESS here because ExtractH264CodecDetails doesn't
- // care about this, but we make sure constraints is above 4 (constraint_set5_flag)
- // otherwise collect 0 for unknown.
- uint8_t constraints = PromiseFlatString(Substring(aCodec, 7, 2)).ToInteger(&rv, 16);
- Telemetry::Accumulate(Telemetry::VIDEO_CANPLAYTYPE_H264_CONSTRAINT_SET_FLAG,
- constraints >= 4 ? constraints : 0);
-
- // 244 is the highest meaningful profile value (High 4:4:4 Intra Profile)
- // that can be represented as single hex byte, otherwise collect 0 for unknown.
- Telemetry::Accumulate(Telemetry::VIDEO_CANPLAYTYPE_H264_PROFILE,
- aProfile <= 244 ? aProfile : 0);
-
- // Make sure aLevel represents a value between levels 1 and 5.2,
- // otherwise collect 0 for unknown.
- Telemetry::Accumulate(Telemetry::VIDEO_CANPLAYTYPE_H264_LEVEL,
- (aLevel >= 10 && aLevel <= 52) ? aLevel : 0);
-
return true;
}
@@ -445,6 +438,16 @@ ParseMIMETypeString(const nsAString& aMIMEType,
return ParseCodecsString(codecsStr, aOutCodecs);
}
+template <int N>
+static bool
+StartsWith(const nsACString& string, const char (&prefix)[N])
+{
+ if (N - 1 > string.Length()) {
+ return false;
+ }
+ return memcmp(string.Data(), prefix, N - 1) == 0;
+}
+
bool
IsH264CodecString(const nsAString& aCodec)
{
@@ -477,15 +480,14 @@ IsVP9CodecString(const nsAString& aCodec)
aCodec.EqualsLiteral("vp9.0");
}
-template <int N>
-static bool
-StartsWith(const nsACString& string, const char (&prefix)[N])
+#ifdef MOZ_AV1
+bool
+IsAV1CodecString(const nsAString& aCodec)
{
- if (N - 1 > string.Length()) {
- return false;
- }
- return memcmp(string.Data(), prefix, N - 1) == 0;
+ return aCodec.EqualsLiteral("av1") ||
+ StartsWith(NS_ConvertUTF16toUTF8(aCodec), "av01");
}
+#endif
UniquePtr<TrackInfo>
CreateTrackInfoWithMIMEType(const nsACString& aCodecMIMEType)
diff --git a/dom/media/VideoUtils.h b/dom/media/VideoUtils.h
index 441b63792..aaf0e9903 100644
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -345,6 +345,11 @@ IsVP8CodecString(const nsAString& aCodec);
bool
IsVP9CodecString(const nsAString& aCodec);
+#ifdef MOZ_AV1
+bool
+IsAV1CodecString(const nsAString& aCodec);
+#endif
+
// Try and create a TrackInfo with a given codec MIME type.
UniquePtr<TrackInfo>
CreateTrackInfoWithMIMEType(const nsACString& aCodecMIMEType);
diff --git a/dom/media/WebVTTListener.h b/dom/media/WebVTTListener.h
index 67271664a..461d7f00d 100644
--- a/dom/media/WebVTTListener.h
+++ b/dom/media/WebVTTListener.h
@@ -10,6 +10,7 @@
#include "nsIStreamListener.h"
#include "nsIChannelEventSink.h"
#include "nsIInterfaceRequestor.h"
+#include "nsCOMPtr.h"
#include "nsCycleCollectionParticipant.h"
class nsIWebVTTParserWrapper;
diff --git a/dom/media/android/AndroidMediaDecoder.cpp b/dom/media/android/AndroidMediaDecoder.cpp
deleted file mode 100644
index 41ef3fcb0..000000000
--- a/dom/media/android/AndroidMediaDecoder.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "MediaDecoderStateMachine.h"
-#include "AndroidMediaDecoder.h"
-#include "AndroidMediaReader.h"
-
-namespace mozilla {
-
-AndroidMediaDecoder::AndroidMediaDecoder(MediaDecoderOwner* aOwner,
- const nsACString& aType)
- : MediaDecoder(aOwner), mType(aType)
-{
-}
-
-MediaDecoderStateMachine* AndroidMediaDecoder::CreateStateMachine()
-{
- return new MediaDecoderStateMachine(this, new AndroidMediaReader(this, mType));
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/android/AndroidMediaDecoder.h b/dom/media/android/AndroidMediaDecoder.h
deleted file mode 100644
index 88b5a243f..000000000
--- a/dom/media/android/AndroidMediaDecoder.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaDecoder_h_)
-#define AndroidMediaDecoder_h_
-
-#include "MediaDecoder.h"
-#include "AndroidMediaDecoder.h"
-
-namespace mozilla {
-
-class AndroidMediaDecoder : public MediaDecoder
-{
- nsCString mType;
-public:
- AndroidMediaDecoder(MediaDecoderOwner* aOwner, const nsACString& aType);
-
- MediaDecoder* Clone(MediaDecoderOwner* aOwner) override {
- return new AndroidMediaDecoder(aOwner, mType);
- }
- MediaDecoderStateMachine* CreateStateMachine() override;
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/AndroidMediaPluginHost.cpp b/dom/media/android/AndroidMediaPluginHost.cpp
deleted file mode 100644
index d4c4fc59e..000000000
--- a/dom/media/android/AndroidMediaPluginHost.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "mozilla/Preferences.h"
-#include "MediaResource.h"
-#include "mozilla/dom/HTMLMediaElement.h"
-#include "mozilla/Services.h"
-#include "AndroidMediaPluginHost.h"
-#include "nsAutoPtr.h"
-#include "nsXPCOMStrings.h"
-#include "nsISeekableStream.h"
-#include "nsIGfxInfo.h"
-#include "prmem.h"
-#include "prlink.h"
-#include "AndroidMediaResourceServer.h"
-#include "nsServiceManagerUtils.h"
-
-#include "MPAPI.h"
-
-#include "nsIPropertyBag2.h"
-
-#if defined(ANDROID)
-#include "android/log.h"
-#define ALOG(args...) __android_log_print(ANDROID_LOG_INFO, "AndroidMediaPluginHost" , ## args)
-#else
-#define ALOG(args...) /* do nothing */
-#endif
-
-using namespace MPAPI;
-
-Decoder::Decoder() :
- mResource(nullptr), mPrivate(nullptr)
-{
-}
-
-namespace mozilla {
-
-static char* GetResource(Decoder *aDecoder)
-{
- return static_cast<char*>(aDecoder->mResource);
-}
-
-class GetIntPrefEvent : public Runnable {
-public:
- GetIntPrefEvent(const char* aPref, int32_t* aResult)
- : mPref(aPref), mResult(aResult) {}
- NS_IMETHOD Run() override {
- return Preferences::GetInt(mPref, mResult);
- }
-private:
- const char* mPref;
- int32_t* mResult;
-};
-
-static bool GetIntPref(const char* aPref, int32_t* aResult)
-{
- // GetIntPref() is called on the decoder thread, but the Preferences API
- // can only be called on the main thread. Post a runnable and wait.
- NS_ENSURE_TRUE(aPref, false);
- NS_ENSURE_TRUE(aResult, false);
- nsCOMPtr<nsIRunnable> event = new GetIntPrefEvent(aPref, aResult);
- return NS_SUCCEEDED(NS_DispatchToMainThread(event, NS_DISPATCH_SYNC));
-}
-
-static bool
-GetSystemInfoString(const char *aKey, char *aResult, size_t aResultLength)
-{
- NS_ENSURE_TRUE(aKey, false);
- NS_ENSURE_TRUE(aResult, false);
-
- nsCOMPtr<nsIPropertyBag2> infoService = do_GetService("@mozilla.org/system-info;1");
- NS_ASSERTION(infoService, "Could not find a system info service");
-
- nsAutoCString key(aKey);
- nsAutoCString info;
- nsresult rv = infoService->GetPropertyAsACString(NS_ConvertUTF8toUTF16(key),
- info);
-
- NS_ENSURE_SUCCESS(rv, false);
-
- strncpy(aResult, info.get(), aResultLength);
-
- return true;
-}
-
-static PluginHost sPluginHost = {
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- GetIntPref,
- GetSystemInfoString
-};
-
-// Return true if Omx decoding is supported on the device. This checks the
-// built in whitelist/blacklist and preferences to see if that is overridden.
-static bool IsOmxSupported()
-{
- bool forceEnabled =
- Preferences::GetBool("stagefright.force-enabled", false);
- bool disabled =
- Preferences::GetBool("stagefright.disabled", false);
-
- if (disabled) {
- NS_WARNING("XXX stagefright disabled\n");
- return false;
- }
-
- if (!forceEnabled) {
- nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
- if (gfxInfo) {
- int32_t status;
- nsCString discardFailure;
- if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(nsIGfxInfo::FEATURE_STAGEFRIGHT, discardFailure, &status))) {
- if (status != nsIGfxInfo::FEATURE_STATUS_OK) {
- NS_WARNING("XXX stagefright blacklisted\n");
- return false;
- }
- }
- }
- }
-
- return true;
-}
-
-// Return the name of the shared library that implements Omx based decoding. This varies
-// depending on libstagefright version installed on the device and whether it is B2G vs Android.
-// nullptr is returned if Omx decoding is not supported on the device,
-static const char* GetOmxLibraryName()
-{
-#if defined(ANDROID)
- nsCOMPtr<nsIPropertyBag2> infoService = do_GetService("@mozilla.org/system-info;1");
- NS_ASSERTION(infoService, "Could not find a system info service");
-
- int32_t version;
- nsresult rv = infoService->GetPropertyAsInt32(NS_LITERAL_STRING("version"), &version);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Version is: %d", version);
- }
-
- nsAutoString release_version;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("release_version"), release_version);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Release Version is: %s", NS_LossyConvertUTF16toASCII(release_version).get());
- }
-
- nsAutoString device;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("device"), device);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Device is: %s", NS_LossyConvertUTF16toASCII(device).get());
- }
-
- nsAutoString manufacturer;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("manufacturer"), manufacturer);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Manufacturer is: %s", NS_LossyConvertUTF16toASCII(manufacturer).get());
- }
-
- nsAutoString hardware;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("hardware"), hardware);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Hardware is: %s", NS_LossyConvertUTF16toASCII(hardware).get());
- }
-#endif
-
- if (!IsOmxSupported())
- return nullptr;
-
-#if defined(ANDROID)
- if (version >= 17) {
- return "libomxpluginkk.so";
- }
-
- // Ice Cream Sandwich and Jellybean
- return "libomxplugin.so";
-
-#else
- return nullptr;
-#endif
-}
-
-AndroidMediaPluginHost::AndroidMediaPluginHost() {
- MOZ_COUNT_CTOR(AndroidMediaPluginHost);
- MOZ_ASSERT(NS_IsMainThread());
-
- mResourceServer = AndroidMediaResourceServer::Start();
-
- const char* name = GetOmxLibraryName();
- ALOG("Loading OMX Plugin: %s", name ? name : "nullptr");
- if (name) {
- char *path = PR_GetLibraryFilePathname("libxul.so", (PRFuncPtr) GetOmxLibraryName);
- PRLibrary *lib = nullptr;
- if (path) {
- nsAutoCString libpath(path);
- PR_Free(path);
- int32_t slash = libpath.RFindChar('/');
- if (slash != kNotFound) {
- libpath.Truncate(slash + 1);
- libpath.Append(name);
- lib = PR_LoadLibrary(libpath.get());
- }
- }
- if (!lib)
- lib = PR_LoadLibrary(name);
-
- if (lib) {
- Manifest *manifest = static_cast<Manifest *>(PR_FindSymbol(lib, "MPAPI_MANIFEST"));
- if (manifest) {
- mPlugins.AppendElement(manifest);
- ALOG("OMX plugin successfully loaded");
- }
- }
- }
-}
-
-AndroidMediaPluginHost::~AndroidMediaPluginHost() {
- mResourceServer->Stop();
- MOZ_COUNT_DTOR(AndroidMediaPluginHost);
-}
-
-bool AndroidMediaPluginHost::FindDecoder(const nsACString& aMimeType, const char* const** aCodecs)
-{
- const char *chars;
- size_t len = NS_CStringGetData(aMimeType, &chars, nullptr);
- for (size_t n = 0; n < mPlugins.Length(); ++n) {
- Manifest *plugin = mPlugins[n];
- const char* const *codecs;
- if (plugin->CanDecode(chars, len, &codecs)) {
- if (aCodecs)
- *aCodecs = codecs;
- return true;
- }
- }
- return false;
-}
-
-MPAPI::Decoder *AndroidMediaPluginHost::CreateDecoder(MediaResource *aResource, const nsACString& aMimeType)
-{
- NS_ENSURE_TRUE(aResource, nullptr);
-
- nsAutoPtr<Decoder> decoder(new Decoder());
- if (!decoder) {
- return nullptr;
- }
-
- const char *chars;
- size_t len = NS_CStringGetData(aMimeType, &chars, nullptr);
- for (size_t n = 0; n < mPlugins.Length(); ++n) {
- Manifest *plugin = mPlugins[n];
- const char* const *codecs;
- if (!plugin->CanDecode(chars, len, &codecs)) {
- continue;
- }
-
- nsCString url;
- nsresult rv = mResourceServer->AddResource(aResource, url);
- if (NS_FAILED (rv)) continue;
-
- decoder->mResource = strdup(url.get());
- if (plugin->CreateDecoder(&sPluginHost, decoder, chars, len)) {
- return decoder.forget();
- }
- }
-
- return nullptr;
-}
-
-void AndroidMediaPluginHost::DestroyDecoder(Decoder *aDecoder)
-{
- aDecoder->DestroyDecoder(aDecoder);
- char* resource = GetResource(aDecoder);
- if (resource) {
- // resource *shouldn't* be null, but check anyway just in case the plugin
- // decoder does something stupid.
- mResourceServer->RemoveResource(nsCString(resource));
- free(resource);
- }
- delete aDecoder;
-}
-
-AndroidMediaPluginHost *sAndroidMediaPluginHost = nullptr;
-AndroidMediaPluginHost *EnsureAndroidMediaPluginHost()
-{
- MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread());
- if (!sAndroidMediaPluginHost) {
- sAndroidMediaPluginHost = new AndroidMediaPluginHost();
- }
- return sAndroidMediaPluginHost;
-}
-
-AndroidMediaPluginHost *GetAndroidMediaPluginHost()
-{
- MOZ_ASSERT(sAndroidMediaPluginHost);
- return sAndroidMediaPluginHost;
-}
-
-void AndroidMediaPluginHost::Shutdown()
-{
- delete sAndroidMediaPluginHost;
- sAndroidMediaPluginHost = nullptr;
-}
-
-} // namespace mozilla
diff --git a/dom/media/android/AndroidMediaPluginHost.h b/dom/media/android/AndroidMediaPluginHost.h
deleted file mode 100644
index 854b7f21e..000000000
--- a/dom/media/android/AndroidMediaPluginHost.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaPluginHost_h_)
-#define AndroidMediaPluginHost_h_
-
-#include "nsTArray.h"
-#include "MediaResource.h"
-#include "MPAPI.h"
-#include "AndroidMediaResourceServer.h"
-
-namespace mozilla {
-
-class AndroidMediaPluginHost {
- RefPtr<AndroidMediaResourceServer> mResourceServer;
- nsTArray<MPAPI::Manifest *> mPlugins;
-
- MPAPI::Manifest *FindPlugin(const nsACString& aMimeType);
-public:
- AndroidMediaPluginHost();
- ~AndroidMediaPluginHost();
-
- static void Shutdown();
-
- bool FindDecoder(const nsACString& aMimeType, const char* const** aCodecs);
- MPAPI::Decoder *CreateDecoder(mozilla::MediaResource *aResource, const nsACString& aMimeType);
- void DestroyDecoder(MPAPI::Decoder *aDecoder);
-};
-
-// Must be called on the main thread. Creates the plugin host if it doesn't
-// already exist.
-AndroidMediaPluginHost *EnsureAndroidMediaPluginHost();
-
-// May be called on any thread after EnsureAndroidMediaPluginHost has been called.
-AndroidMediaPluginHost *GetAndroidMediaPluginHost();
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/AndroidMediaReader.cpp b/dom/media/android/AndroidMediaReader.cpp
deleted file mode 100644
index 12afacbc9..000000000
--- a/dom/media/android/AndroidMediaReader.cpp
+++ /dev/null
@@ -1,449 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "AndroidMediaReader.h"
-#include "mozilla/TimeStamp.h"
-#include "mozilla/gfx/Point.h"
-#include "MediaResource.h"
-#include "VideoUtils.h"
-#include "AndroidMediaDecoder.h"
-#include "AndroidMediaPluginHost.h"
-#include "MediaDecoderStateMachine.h"
-#include "ImageContainer.h"
-#include "AbstractMediaDecoder.h"
-#include "gfx2DGlue.h"
-#include "VideoFrameContainer.h"
-#include "mozilla/CheckedInt.h"
-
-namespace mozilla {
-
-using namespace mozilla::gfx;
-using namespace mozilla::media;
-
-typedef mozilla::layers::Image Image;
-typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
-
-AndroidMediaReader::AndroidMediaReader(AbstractMediaDecoder *aDecoder,
- const nsACString& aContentType) :
- MediaDecoderReader(aDecoder),
- mType(aContentType),
- mPlugin(nullptr),
- mHasAudio(false),
- mHasVideo(false),
- mVideoSeekTimeUs(-1),
- mAudioSeekTimeUs(-1)
-{
-}
-
-nsresult AndroidMediaReader::ReadMetadata(MediaInfo* aInfo,
- MetadataTags** aTags)
-{
- MOZ_ASSERT(OnTaskQueue());
-
- if (!mPlugin) {
- mPlugin = GetAndroidMediaPluginHost()->CreateDecoder(mDecoder->GetResource(), mType);
- if (!mPlugin) {
- return NS_ERROR_FAILURE;
- }
- }
-
- // Set the total duration (the max of the audio and video track).
- int64_t durationUs;
- mPlugin->GetDuration(mPlugin, &durationUs);
- if (durationUs) {
- mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(durationUs));
- }
-
- if (mPlugin->HasVideo(mPlugin)) {
- int32_t width, height;
- mPlugin->GetVideoParameters(mPlugin, &width, &height);
- nsIntRect pictureRect(0, 0, width, height);
-
- // Validate the container-reported frame and pictureRect sizes. This ensures
- // that our video frame creation code doesn't overflow.
- nsIntSize displaySize(width, height);
- nsIntSize frameSize(width, height);
- if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
- return NS_ERROR_FAILURE;
- }
-
- // Video track's frame sizes will not overflow. Activate the video track.
- mHasVideo = true;
- mInfo.mVideo.mDisplay = displaySize;
- mPicture = pictureRect;
- mInitialFrame = frameSize;
- VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
- if (container) {
- container->ClearCurrentFrame(IntSize(displaySize.width, displaySize.height));
- }
- }
-
- if (mPlugin->HasAudio(mPlugin)) {
- int32_t numChannels, sampleRate;
- mPlugin->GetAudioParameters(mPlugin, &numChannels, &sampleRate);
- mHasAudio = true;
- mInfo.mAudio.mChannels = numChannels;
- mInfo.mAudio.mRate = sampleRate;
- }
-
- *aInfo = mInfo;
- *aTags = nullptr;
- return NS_OK;
-}
-
-RefPtr<ShutdownPromise>
-AndroidMediaReader::Shutdown()
-{
- ResetDecode();
- if (mPlugin) {
- GetAndroidMediaPluginHost()->DestroyDecoder(mPlugin);
- mPlugin = nullptr;
- }
-
- return MediaDecoderReader::Shutdown();
-}
-
-// Resets all state related to decoding, emptying all buffers etc.
-nsresult AndroidMediaReader::ResetDecode(TrackSet aTracks)
-{
- if (mLastVideoFrame) {
- mLastVideoFrame = nullptr;
- }
- mSeekRequest.DisconnectIfExists();
- mSeekPromise.RejectIfExists(NS_OK, __func__);
- return MediaDecoderReader::ResetDecode(aTracks);
-}
-
-bool AndroidMediaReader::DecodeVideoFrame(bool &aKeyframeSkip,
- int64_t aTimeThreshold)
-{
- // Record number of frames decoded and parsed. Automatically update the
- // stats counters using the AutoNotifyDecoded stack-based class.
- AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);
-
- // Throw away the currently buffered frame if we are seeking.
- if (mLastVideoFrame && mVideoSeekTimeUs != -1) {
- mLastVideoFrame = nullptr;
- }
-
- ImageBufferCallback bufferCallback(mDecoder->GetImageContainer());
- RefPtr<Image> currentImage;
-
- // Read next frame
- while (true) {
- MPAPI::VideoFrame frame;
- if (!mPlugin->ReadVideo(mPlugin, &frame, mVideoSeekTimeUs, &bufferCallback)) {
- // We reached the end of the video stream. If we have a buffered
- // video frame, push it the video queue using the total duration
- // of the video as the end time.
- if (mLastVideoFrame) {
- int64_t durationUs;
- mPlugin->GetDuration(mPlugin, &durationUs);
- durationUs = std::max<int64_t>(durationUs - mLastVideoFrame->mTime, 0);
- RefPtr<VideoData> data = VideoData::ShallowCopyUpdateDuration(mLastVideoFrame,
- durationUs);
- mVideoQueue.Push(data);
- mLastVideoFrame = nullptr;
- }
- return false;
- }
- mVideoSeekTimeUs = -1;
-
- if (aKeyframeSkip) {
- // Disable keyframe skipping for now as
- // stagefright doesn't seem to be telling us
- // when a frame is a keyframe.
-#if 0
- if (!frame.mKeyFrame) {
- ++a.mStats.mParsedFrames;
- ++a.mStats.mDroppedFrames;
- continue;
- }
-#endif
- aKeyframeSkip = false;
- }
-
- if (frame.mSize == 0)
- return true;
-
- currentImage = bufferCallback.GetImage();
- int64_t pos = mDecoder->GetResource()->Tell();
- IntRect picture = mPicture;
-
- RefPtr<VideoData> v;
- if (currentImage) {
- gfx::IntSize frameSize = currentImage->GetSize();
- if (frameSize.width != mInitialFrame.width ||
- frameSize.height != mInitialFrame.height) {
- // Frame size is different from what the container reports. This is legal,
- // and we will preserve the ratio of the crop rectangle as it
- // was reported relative to the picture size reported by the container.
- picture.x = (mPicture.x * frameSize.width) / mInitialFrame.width;
- picture.y = (mPicture.y * frameSize.height) / mInitialFrame.height;
- picture.width = (frameSize.width * mPicture.width) / mInitialFrame.width;
- picture.height = (frameSize.height * mPicture.height) / mInitialFrame.height;
- }
-
- v = VideoData::CreateFromImage(mInfo.mVideo,
- pos,
- frame.mTimeUs,
- 1, // We don't know the duration yet.
- currentImage,
- frame.mKeyFrame,
- -1,
- picture);
- } else {
- // Assume YUV
- VideoData::YCbCrBuffer b;
- b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
- b.mPlanes[0].mStride = frame.Y.mStride;
- b.mPlanes[0].mHeight = frame.Y.mHeight;
- b.mPlanes[0].mWidth = frame.Y.mWidth;
- b.mPlanes[0].mOffset = frame.Y.mOffset;
- b.mPlanes[0].mSkip = frame.Y.mSkip;
-
- b.mPlanes[1].mData = static_cast<uint8_t *>(frame.Cb.mData);
- b.mPlanes[1].mStride = frame.Cb.mStride;
- b.mPlanes[1].mHeight = frame.Cb.mHeight;
- b.mPlanes[1].mWidth = frame.Cb.mWidth;
- b.mPlanes[1].mOffset = frame.Cb.mOffset;
- b.mPlanes[1].mSkip = frame.Cb.mSkip;
-
- b.mPlanes[2].mData = static_cast<uint8_t *>(frame.Cr.mData);
- b.mPlanes[2].mStride = frame.Cr.mStride;
- b.mPlanes[2].mHeight = frame.Cr.mHeight;
- b.mPlanes[2].mWidth = frame.Cr.mWidth;
- b.mPlanes[2].mOffset = frame.Cr.mOffset;
- b.mPlanes[2].mSkip = frame.Cr.mSkip;
-
- if (frame.Y.mWidth != mInitialFrame.width ||
- frame.Y.mHeight != mInitialFrame.height) {
-
- // Frame size is different from what the container reports. This is legal,
- // and we will preserve the ratio of the crop rectangle as it
- // was reported relative to the picture size reported by the container.
- picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width;
- picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height;
- picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width;
- picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height;
- }
-
- // This is the approximate byte position in the stream.
- v = VideoData::CreateAndCopyData(mInfo.mVideo,
- mDecoder->GetImageContainer(),
- pos,
- frame.mTimeUs,
- 1, // We don't know the duration yet.
- b,
- frame.mKeyFrame,
- -1,
- picture);
- }
-
- if (!v) {
- return false;
- }
- a.mStats.mParsedFrames++;
- a.mStats.mDecodedFrames++;
- NS_ASSERTION(a.mStats.mDecodedFrames <= a.mStats.mParsedFrames, "Expect to decode fewer frames than parsed in AndroidMedia...");
-
- // Since MPAPI doesn't give us the end time of frames, we keep one frame
- // buffered in AndroidMediaReader and push it into the queue as soon
- // we read the following frame so we can use that frame's start time as
- // the end time of the buffered frame.
- if (!mLastVideoFrame) {
- mLastVideoFrame = v;
- continue;
- }
-
- // Calculate the duration as the timestamp of the current frame minus the
- // timestamp of the previous frame. We can then return the previously
- // decoded frame, and it will have a valid timestamp.
- int64_t duration = v->mTime - mLastVideoFrame->mTime;
- mLastVideoFrame = VideoData::ShallowCopyUpdateDuration(mLastVideoFrame, duration);
-
- // We have the start time of the next frame, so we can push the previous
- // frame into the queue, except if the end time is below the threshold,
- // in which case it wouldn't be displayed anyway.
- if (mLastVideoFrame->GetEndTime() < aTimeThreshold) {
- mLastVideoFrame = nullptr;
- continue;
- }
-
- // Buffer the current frame we just decoded.
- mVideoQueue.Push(mLastVideoFrame);
- mLastVideoFrame = v;
-
- break;
- }
-
- return true;
-}
-
-bool AndroidMediaReader::DecodeAudioData()
-{
- MOZ_ASSERT(OnTaskQueue());
-
- // This is the approximate byte position in the stream.
- int64_t pos = mDecoder->GetResource()->Tell();
-
- // Read next frame
- MPAPI::AudioFrame source;
- if (!mPlugin->ReadAudio(mPlugin, &source, mAudioSeekTimeUs)) {
- return false;
- }
- mAudioSeekTimeUs = -1;
-
- // Ignore empty buffers which stagefright media read will sporadically return
- if (source.mSize == 0)
- return true;
-
- uint32_t frames = source.mSize / (source.mAudioChannels *
- sizeof(AudioDataValue));
-
- typedef AudioCompactor::NativeCopy MPCopy;
- return mAudioCompactor.Push(pos,
- source.mTimeUs,
- source.mAudioSampleRate,
- frames,
- source.mAudioChannels,
- MPCopy(static_cast<uint8_t *>(source.mData),
- source.mSize,
- source.mAudioChannels));
-}
-
-RefPtr<MediaDecoderReader::SeekPromise>
-AndroidMediaReader::Seek(SeekTarget aTarget, int64_t aEndTime)
-{
- MOZ_ASSERT(OnTaskQueue());
-
- RefPtr<SeekPromise> p = mSeekPromise.Ensure(__func__);
- if (mHasAudio && mHasVideo) {
- // The decoder seeks/demuxes audio and video streams separately. So if
- // we seek both audio and video to aTarget, the audio stream can typically
- // seek closer to the seek target, since typically every audio block is
- // a sync point, whereas for video there are only keyframes once every few
- // seconds. So if we have both audio and video, we must seek the video
- // stream to the preceeding keyframe first, get the stream time, and then
- // seek the audio stream to match the video stream's time. Otherwise, the
- // audio and video streams won't be in sync after the seek.
- mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
-
- RefPtr<AndroidMediaReader> self = this;
- mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
- self->mSeekRequest.Complete();
- self->mAudioSeekTimeUs = v->mTime;
- self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__);
- }, [self, aTarget] () {
- self->mSeekRequest.Complete();
- self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds();
- self->mSeekPromise.Resolve(aTarget.GetTime(), __func__);
- }));
- } else {
- mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
- mSeekPromise.Resolve(aTarget.GetTime(), __func__);
- }
-
- return p;
-}
-
-AndroidMediaReader::ImageBufferCallback::ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer) :
- mImageContainer(aImageContainer)
-{
-}
-
-void *
-AndroidMediaReader::ImageBufferCallback::operator()(size_t aWidth, size_t aHeight,
- MPAPI::ColorFormat aColorFormat)
-{
- if (!mImageContainer) {
- NS_WARNING("No image container to construct an image");
- return nullptr;
- }
-
- RefPtr<Image> image;
- switch(aColorFormat) {
- case MPAPI::RGB565:
- image = mozilla::layers::CreateSharedRGBImage(mImageContainer,
- nsIntSize(aWidth, aHeight),
- SurfaceFormat::R5G6B5_UINT16);
- if (!image) {
- NS_WARNING("Could not create rgb image");
- return nullptr;
- }
-
- mImage = image;
- return image->GetBuffer();
- case MPAPI::I420:
- return CreateI420Image(aWidth, aHeight);
- default:
- NS_NOTREACHED("Color format not supported");
- return nullptr;
- }
-}
-
-uint8_t *
-AndroidMediaReader::ImageBufferCallback::CreateI420Image(size_t aWidth,
- size_t aHeight)
-{
- RefPtr<PlanarYCbCrImage> yuvImage = mImageContainer->CreatePlanarYCbCrImage();
- mImage = yuvImage;
-
- if (!yuvImage) {
- NS_WARNING("Could not create I420 image");
- return nullptr;
- }
-
- // Use uint32_t throughout to match AllocateAndGetNewBuffer's param
- const auto checkedFrameSize =
- CheckedInt<uint32_t>(aWidth) * aHeight;
-
- // Allocate enough for one full resolution Y plane
- // and two quarter resolution Cb/Cr planes.
- const auto checkedBufferSize =
- checkedFrameSize + checkedFrameSize / 2;
-
- if (!checkedBufferSize.isValid()) { // checks checkedFrameSize too
- NS_WARNING("Could not create I420 image");
- return nullptr;
- }
-
- const auto frameSize = checkedFrameSize.value();
-
- uint8_t *buffer =
- yuvImage->AllocateAndGetNewBuffer(checkedBufferSize.value());
-
- mozilla::layers::PlanarYCbCrData frameDesc;
-
- frameDesc.mYChannel = buffer;
- frameDesc.mCbChannel = buffer + frameSize;
- frameDesc.mCrChannel = frameDesc.mCbChannel + frameSize / 4;
-
- frameDesc.mYSize = IntSize(aWidth, aHeight);
- frameDesc.mCbCrSize = IntSize(aWidth / 2, aHeight / 2);
-
- frameDesc.mYStride = aWidth;
- frameDesc.mCbCrStride = aWidth / 2;
-
- frameDesc.mYSkip = 0;
- frameDesc.mCbSkip = 0;
- frameDesc.mCrSkip = 0;
-
- frameDesc.mPicX = 0;
- frameDesc.mPicY = 0;
- frameDesc.mPicSize = IntSize(aWidth, aHeight);
-
- yuvImage->AdoptData(frameDesc);
-
- return buffer;
-}
-
-already_AddRefed<Image>
-AndroidMediaReader::ImageBufferCallback::GetImage()
-{
- return mImage.forget();
-}
-
-} // namespace mozilla
diff --git a/dom/media/android/AndroidMediaReader.h b/dom/media/android/AndroidMediaReader.h
deleted file mode 100644
index def85a343..000000000
--- a/dom/media/android/AndroidMediaReader.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaReader_h_)
-#define AndroidMediaReader_h_
-
-#include "mozilla/Attributes.h"
-#include "MediaResource.h"
-#include "MediaDecoderReader.h"
-#include "ImageContainer.h"
-#include "mozilla/layers/SharedRGBImage.h"
-
-#include "MPAPI.h"
-
-class nsACString;
-
-namespace mozilla {
-
-class AbstractMediaDecoder;
-
-namespace layers {
-class ImageContainer;
-}
-
-class AndroidMediaReader : public MediaDecoderReader
-{
- nsCString mType;
- MPAPI::Decoder *mPlugin;
- bool mHasAudio;
- bool mHasVideo;
- nsIntRect mPicture;
- nsIntSize mInitialFrame;
- int64_t mVideoSeekTimeUs;
- int64_t mAudioSeekTimeUs;
- RefPtr<VideoData> mLastVideoFrame;
- MozPromiseHolder<MediaDecoderReader::SeekPromise> mSeekPromise;
- MozPromiseRequestHolder<MediaDecoderReader::MediaDataPromise> mSeekRequest;
-public:
- AndroidMediaReader(AbstractMediaDecoder* aDecoder,
- const nsACString& aContentType);
-
- nsresult ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
- TrackInfo::kVideoTrack)) override;
-
- bool DecodeAudioData() override;
- bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) override;
-
- nsresult ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) override;
- RefPtr<SeekPromise> Seek(SeekTarget aTarget, int64_t aEndTime) override;
-
- RefPtr<ShutdownPromise> Shutdown() override;
-
- class ImageBufferCallback : public MPAPI::BufferCallback {
- typedef mozilla::layers::Image Image;
-
- public:
- ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer);
- void *operator()(size_t aWidth, size_t aHeight,
- MPAPI::ColorFormat aColorFormat) override;
- already_AddRefed<Image> GetImage();
-
- private:
- uint8_t *CreateI420Image(size_t aWidth, size_t aHeight);
-
- mozilla::layers::ImageContainer *mImageContainer;
- RefPtr<Image> mImage;
- };
-
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/AndroidMediaResourceServer.cpp b/dom/media/android/AndroidMediaResourceServer.cpp
deleted file mode 100644
index bd76a8c68..000000000
--- a/dom/media/android/AndroidMediaResourceServer.cpp
+++ /dev/null
@@ -1,503 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "mozilla/Assertions.h"
-#include "mozilla/Base64.h"
-#include "mozilla/IntegerPrintfMacros.h"
-#include "mozilla/UniquePtr.h"
-#include "nsThreadUtils.h"
-#include "nsIServiceManager.h"
-#include "nsISocketTransport.h"
-#include "nsIOutputStream.h"
-#include "nsIInputStream.h"
-#include "nsIRandomGenerator.h"
-#include "nsReadLine.h"
-#include "nsNetCID.h"
-#include "VideoUtils.h"
-#include "MediaResource.h"
-#include "AndroidMediaResourceServer.h"
-
-#if defined(_MSC_VER)
-#define strtoll _strtoi64
-#endif
-
-using namespace mozilla;
-
-/*
- ReadCRLF is a variant of NS_ReadLine from nsReadLine.h that deals
- with the carriage return/line feed requirements of HTTP requests.
-*/
-template<typename CharT, class StreamType, class StringType>
-nsresult
-ReadCRLF (StreamType* aStream, nsLineBuffer<CharT> * aBuffer,
- StringType & aLine, bool *aMore)
-{
- // eollast is true if the last character in the buffer is a '\r',
- // signaling a potential '\r\n' sequence split between reads.
- bool eollast = false;
-
- aLine.Truncate();
-
- while (1) { // will be returning out of this loop on eol or eof
- if (aBuffer->start == aBuffer->end) { // buffer is empty. Read into it.
- uint32_t bytesRead;
- nsresult rv = aStream->Read(aBuffer->buf, kLineBufferSize, &bytesRead);
- if (NS_FAILED(rv) || bytesRead == 0) {
- *aMore = false;
- return rv;
- }
- aBuffer->start = aBuffer->buf;
- aBuffer->end = aBuffer->buf + bytesRead;
- *(aBuffer->end) = '\0';
- }
-
- /*
- * Walk the buffer looking for an end-of-line.
- * There are 4 cases to consider:
- * 1. the CR char is the last char in the buffer
- * 2. the CRLF sequence are the last characters in the buffer
- * 3. the CRLF sequence + one or more chars at the end of the buffer
- * we need at least one char after the first CRLF sequence to
- * set |aMore| correctly.
- * 4. The LF character is the first char in the buffer when eollast is
- * true.
- */
- CharT* current = aBuffer->start;
- if (eollast) { // Case 4
- if (*current == '\n') {
- aBuffer->start = ++current;
- *aMore = true;
- return NS_OK;
- }
- else {
- eollast = false;
- aLine.Append('\r');
- }
- }
- // Cases 2 and 3
- for ( ; current < aBuffer->end-1; ++current) {
- if (*current == '\r' && *(current+1) == '\n') {
- *current++ = '\0';
- *current++ = '\0';
- aLine.Append(aBuffer->start);
- aBuffer->start = current;
- *aMore = true;
- return NS_OK;
- }
- }
- // Case 1
- if (*current == '\r') {
- eollast = true;
- *current++ = '\0';
- }
-
- aLine.Append(aBuffer->start);
- aBuffer->start = aBuffer->end; // mark the buffer empty
- }
-}
-
-// Each client HTTP request results in a thread being spawned to process it.
-// That thread has a single event dispatched to it which handles the HTTP
-// protocol. It parses the headers and forwards data from the MediaResource
-// associated with the URL back to client. When the request is complete it will
-// shutdown the thread.
-class ServeResourceEvent : public Runnable {
-private:
- // Reading from this reads the data sent from the client.
- nsCOMPtr<nsIInputStream> mInput;
-
- // Writing to this sends data to the client.
- nsCOMPtr<nsIOutputStream> mOutput;
-
- // The AndroidMediaResourceServer that owns the MediaResource instances
- // served. This is used to lookup the MediaResource from the URL.
- RefPtr<AndroidMediaResourceServer> mServer;
-
- // Write 'aBufferLength' bytes from 'aBuffer' to 'mOutput'. This
- // method ensures all the data is written by checking the number
- // of bytes returned from the output streams 'Write' method and
- // looping until done.
- nsresult WriteAll(char const* aBuffer, int32_t aBufferLength);
-
-public:
- ServeResourceEvent(nsIInputStream* aInput, nsIOutputStream* aOutput,
- AndroidMediaResourceServer* aServer)
- : mInput(aInput), mOutput(aOutput), mServer(aServer) {}
-
- // This method runs on the thread and exits when it has completed the
- // HTTP request.
- NS_IMETHOD Run();
-
- // Given the first line of an HTTP request, parse the URL requested and
- // return the MediaResource for that URL.
- already_AddRefed<MediaResource> GetMediaResource(nsCString const& aHTTPRequest);
-
- // Gracefully shutdown the thread and cleanup resources
- void Shutdown();
-};
-
-nsresult
-ServeResourceEvent::WriteAll(char const* aBuffer, int32_t aBufferLength)
-{
- while (aBufferLength > 0) {
- uint32_t written = 0;
- nsresult rv = mOutput->Write(aBuffer, aBufferLength, &written);
- if (NS_FAILED (rv)) return rv;
-
- aBufferLength -= written;
- aBuffer += written;
- }
-
- return NS_OK;
-}
-
-already_AddRefed<MediaResource>
-ServeResourceEvent::GetMediaResource(nsCString const& aHTTPRequest)
-{
- // Check that the HTTP method is GET
- const char* HTTP_METHOD = "GET ";
- if (strncmp(aHTTPRequest.get(), HTTP_METHOD, strlen(HTTP_METHOD)) != 0) {
- return nullptr;
- }
-
- const char* url_start = strchr(aHTTPRequest.get(), ' ');
- if (!url_start) {
- return nullptr;
- }
-
- const char* url_end = strrchr(++url_start, ' ');
- if (!url_end) {
- return nullptr;
- }
-
- // The path extracted from the HTTP request is used as a key in hash
- // table. It is not related to retrieving data from the filesystem so
- // we don't need to do any sanity checking on ".." paths and similar
- // exploits.
- nsCString relative(url_start, url_end - url_start);
- RefPtr<MediaResource> resource =
- mServer->GetResource(mServer->GetURLPrefix() + relative);
- return resource.forget();
-}
-
-NS_IMETHODIMP
-ServeResourceEvent::Run() {
- bool more = false; // Are there HTTP headers to read after the first line
- nsCString line; // Contains the current line read from input stream
- nsLineBuffer<char>* buffer = new nsLineBuffer<char>();
- nsresult rv = ReadCRLF(mInput.get(), buffer, line, &more);
- if (NS_FAILED(rv)) { Shutdown(); return rv; }
-
- // First line contains the HTTP GET request. Extract the URL and obtain
- // the MediaResource for it.
- RefPtr<MediaResource> resource = GetMediaResource(line);
- if (!resource) {
- const char* response_404 = "HTTP/1.1 404 Not Found\r\n"
- "Content-Length: 0\r\n\r\n";
- rv = WriteAll(response_404, strlen(response_404));
- Shutdown();
- return rv;
- }
-
- // Offset in bytes to start reading from resource.
- // This is zero by default but can be set to another starting value if
- // this HTTP request includes a byte range request header.
- int64_t start = 0;
-
- // Keep reading lines until we get a zero length line, which is the HTTP
- // protocol's way of signifying the end of headers and start of body, or
- // until we have no more data to read.
- while (more && line.Length() > 0) {
- rv = ReadCRLF(mInput.get(), buffer, line, &more);
- if (NS_FAILED(rv)) { Shutdown(); return rv; }
-
- // Look for a byte range request header. If there is one, set the
- // media resource offset to start from to that requested. Here we
- // only check for the range request format used by Android rather
- // than implementing all possibilities in the HTTP specification.
- // That is, the range request is of the form:
- // Range: bytes=nnnn-
- // Were 'nnnn' is an integer number.
- // The end of the range is not checked, instead we return up to
- // the end of the resource and the client is informed of this via
- // the content-range header.
- NS_NAMED_LITERAL_CSTRING(byteRange, "Range: bytes=");
- const char* s = strstr(line.get(), byteRange.get());
- if (s) {
- start = strtoll(s+byteRange.Length(), nullptr, 10);
-
- // Clamp 'start' to be between 0 and the resource length.
- start = std::max(int64_t(0), std::min(resource->GetLength(), start));
- }
- }
-
- // HTTP response to use if this is a non byte range request
- const char* response_normal = "HTTP/1.1 200 OK\r\n";
-
- // HTTP response to use if this is a byte range request
- const char* response_range = "HTTP/1.1 206 Partial Content\r\n";
-
- // End of HTTP reponse headers is indicated by an empty line.
- const char* response_end = "\r\n";
-
- // If the request was a byte range request, we need to read from the
- // requested offset. If the resource is non-seekable, or the seek
- // fails, then the start offset is set back to zero. This results in all
- // HTTP response data being as if the byte range request was not made.
- if (start > 0 && !resource->IsTransportSeekable()) {
- start = 0;
- }
-
- const char* response_line = start > 0 ?
- response_range :
- response_normal;
- rv = WriteAll(response_line, strlen(response_line));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
-
- // Buffer used for reading from the input stream and writing to
- // the output stream. The buffer size should be big enough for the
- // HTTP response headers sent below. A static_assert ensures
- // this where the buffer is used.
- const int buffer_size = 32768;
- auto b = MakeUnique<char[]>(buffer_size);
-
- // If we know the length of the resource, send a Content-Length header.
- int64_t contentlength = resource->GetLength() - start;
- if (contentlength > 0) {
- static_assert (buffer_size > 1024,
- "buffer_size must be large enough "
- "to hold response headers");
- snprintf(b.get(), buffer_size, "Content-Length: %" PRId64 "\r\n", contentlength);
- rv = WriteAll(b.get(), strlen(b.get()));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
- }
-
- // If the request was a byte range request, respond with a Content-Range
- // header which details the extent of the data returned.
- if (start > 0) {
- static_assert (buffer_size > 1024,
- "buffer_size must be large enough "
- "to hold response headers");
- snprintf(b.get(), buffer_size, "Content-Range: "
- "bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n",
- start, resource->GetLength() - 1, resource->GetLength());
- rv = WriteAll(b.get(), strlen(b.get()));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
- }
-
- rv = WriteAll(response_end, strlen(response_end));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
-
- rv = mOutput->Flush();
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
-
- // Read data from media resource
- uint32_t bytesRead = 0; // Number of bytes read/written to streams
- rv = resource->ReadAt(start, b.get(), buffer_size, &bytesRead);
- while (NS_SUCCEEDED(rv) && bytesRead != 0) {
- // Keep track of what we think the starting position for the next read
- // is. This is used in subsequent ReadAt calls to ensure we are reading
- // from the correct offset in the case where another thread is reading
- // from th same MediaResource.
- start += bytesRead;
-
- // Write data obtained from media resource to output stream
- rv = WriteAll(b.get(), bytesRead);
- if (NS_FAILED (rv)) break;
-
- rv = resource->ReadAt(start, b.get(), 32768, &bytesRead);
- }
-
- Shutdown();
- return NS_OK;
-}
-
-void
-ServeResourceEvent::Shutdown()
-{
- // Cleanup resources and exit.
- mInput->Close();
- mOutput->Close();
-
- // To shutdown the current thread we need to first exit this event.
- // The Shutdown event below is posted to the main thread to do this.
- nsCOMPtr<nsIRunnable> event = new ShutdownThreadEvent(NS_GetCurrentThread());
- NS_DispatchToMainThread(event);
-}
-
-/*
- This is the listener attached to the server socket. When an HTTP
- request is made by the client the OnSocketAccepted method is
- called. This method will spawn a thread to process the request.
- The thread receives a single event which does the parsing of
- the HTTP request and forwarding the data from the MediaResource
- to the output stream of the request.
-
- The MediaResource used for providing the request data is obtained
- from the AndroidMediaResourceServer that created this listener, using the
- URL the client requested.
-*/
-class ResourceSocketListener : public nsIServerSocketListener
-{
-public:
- // The AndroidMediaResourceServer used to look up the MediaResource
- // on requests.
- RefPtr<AndroidMediaResourceServer> mServer;
-
- NS_DECL_THREADSAFE_ISUPPORTS
- NS_DECL_NSISERVERSOCKETLISTENER
-
- ResourceSocketListener(AndroidMediaResourceServer* aServer) :
- mServer(aServer)
- {
- }
-
-private:
- virtual ~ResourceSocketListener() { }
-};
-
-NS_IMPL_ISUPPORTS(ResourceSocketListener, nsIServerSocketListener)
-
-NS_IMETHODIMP
-ResourceSocketListener::OnSocketAccepted(nsIServerSocket* aServ,
- nsISocketTransport* aTrans)
-{
- nsCOMPtr<nsIInputStream> input;
- nsCOMPtr<nsIOutputStream> output;
- nsresult rv;
-
- rv = aTrans->OpenInputStream(nsITransport::OPEN_BLOCKING, 0, 0, getter_AddRefs(input));
- if (NS_FAILED(rv)) return rv;
-
- rv = aTrans->OpenOutputStream(nsITransport::OPEN_BLOCKING, 0, 0, getter_AddRefs(output));
- if (NS_FAILED(rv)) return rv;
-
- nsCOMPtr<nsIThread> thread;
- rv = NS_NewThread(getter_AddRefs(thread));
- if (NS_FAILED(rv)) return rv;
-
- nsCOMPtr<nsIRunnable> event = new ServeResourceEvent(input.get(), output.get(), mServer);
- return thread->Dispatch(event, NS_DISPATCH_NORMAL);
-}
-
-NS_IMETHODIMP
-ResourceSocketListener::OnStopListening(nsIServerSocket* aServ, nsresult aStatus)
-{
- return NS_OK;
-}
-
-AndroidMediaResourceServer::AndroidMediaResourceServer() :
- mMutex("AndroidMediaResourceServer")
-{
-}
-
-NS_IMETHODIMP
-AndroidMediaResourceServer::Run()
-{
- MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread());
- MutexAutoLock lock(mMutex);
-
- nsresult rv;
- mSocket = do_CreateInstance(NS_SERVERSOCKET_CONTRACTID, &rv);
- if (NS_FAILED(rv)) return rv;
-
- rv = mSocket->InitSpecialConnection(-1,
- nsIServerSocket::LoopbackOnly
- | nsIServerSocket::KeepWhenOffline,
- -1);
- if (NS_FAILED(rv)) return rv;
-
- rv = mSocket->AsyncListen(new ResourceSocketListener(this));
- if (NS_FAILED(rv)) return rv;
-
- return NS_OK;
-}
-
-/* static */
-already_AddRefed<AndroidMediaResourceServer>
-AndroidMediaResourceServer::Start()
-{
- MOZ_ASSERT(NS_IsMainThread());
- RefPtr<AndroidMediaResourceServer> server = new AndroidMediaResourceServer();
- server->Run();
- return server.forget();
-}
-
-void
-AndroidMediaResourceServer::Stop()
-{
- MutexAutoLock lock(mMutex);
- mSocket->Close();
- mSocket = nullptr;
-}
-
-nsresult
-AndroidMediaResourceServer::AppendRandomPath(nsCString& aUrl)
-{
- // Use a cryptographic quality PRNG to generate raw random bytes
- // and convert that to a base64 string for use as an URL path. This
- // is based on code from nsExternalAppHandler::SetUpTempFile.
- nsresult rv;
- nsAutoCString salt;
- rv = GenerateRandomPathName(salt, 16);
- if (NS_FAILED(rv)) return rv;
- aUrl += "/";
- aUrl += salt;
- return NS_OK;
-}
-
-nsresult
-AndroidMediaResourceServer::AddResource(mozilla::MediaResource* aResource, nsCString& aUrl)
-{
- nsCString url = GetURLPrefix();
- nsresult rv = AppendRandomPath(url);
- if (NS_FAILED (rv)) return rv;
-
- {
- MutexAutoLock lock(mMutex);
-
- // Adding a resource URL that already exists is considered an error.
- if (mResources.find(url) != mResources.end()) return NS_ERROR_FAILURE;
- mResources[url] = aResource;
- }
-
- aUrl = url;
-
- return NS_OK;
-}
-
-void
-AndroidMediaResourceServer::RemoveResource(nsCString const& aUrl)
-{
- MutexAutoLock lock(mMutex);
- mResources.erase(aUrl);
-}
-
-nsCString
-AndroidMediaResourceServer::GetURLPrefix()
-{
- MutexAutoLock lock(mMutex);
-
- int32_t port = 0;
- nsresult rv = mSocket->GetPort(&port);
- if (NS_FAILED (rv) || port < 0) {
- return nsCString("");
- }
-
- char buffer[256];
- snprintf(buffer, sizeof(buffer), "http://127.0.0.1:%d", port >= 0 ? port : 0);
- return nsCString(buffer);
-}
-
-already_AddRefed<MediaResource>
-AndroidMediaResourceServer::GetResource(nsCString const& aUrl)
-{
- MutexAutoLock lock(mMutex);
- ResourceMap::const_iterator it = mResources.find(aUrl);
- if (it == mResources.end()) return nullptr;
-
- RefPtr<MediaResource> resource = it->second;
- return resource.forget();
-}
diff --git a/dom/media/android/AndroidMediaResourceServer.h b/dom/media/android/AndroidMediaResourceServer.h
deleted file mode 100644
index 68200f9c0..000000000
--- a/dom/media/android/AndroidMediaResourceServer.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaResourceServer_h_)
-#define AndroidMediaResourceServer_h_
-
-#include <map>
-#include "nsIServerSocket.h"
-#include "MediaResource.h"
-
-namespace mozilla {
-
-class MediaResource;
-
-/*
- AndroidMediaResourceServer instantiates a socket server that understands
- HTTP requests for MediaResource instances. The server runs on an
- automatically selected port and MediaResource instances are registered.
- The registration returns a string URL than can be used to fetch the
- resource. That URL contains a randomly generated path to make it
- difficult for other local applications on the device to guess it.
-
- The HTTP protocol is limited in that it supports only what the
- Android DataSource implementation uses to fetch media. It
- understands HTTP GET and byte range requests.
-
- The intent of this class is to be used in Media backends that
- have a system component that does its own network requests. These
- requests are made against this server which then uses standard
- Gecko network requests and media cache usage.
-
- The AndroidMediaResourceServer can be instantiated on any thread and
- its methods are threadsafe - they can be called on any thread.
- The server socket itself is always run on the main thread and
- this is done by the Start() static method by synchronously
- dispatching to the main thread.
-*/
-class AndroidMediaResourceServer : public Runnable
-{
-private:
- // Mutex protecting private members of AndroidMediaResourceServer.
- // All member variables below this point in the class definition
- // must acquire the mutex before access.
- mozilla::Mutex mMutex;
-
- // Server socket used to listen for incoming connections
- nsCOMPtr<nsIServerSocket> mSocket;
-
- // Mapping between MediaResource URL's to the MediaResource
- // object served at that URL.
- typedef std::map<nsCString,
- RefPtr<mozilla::MediaResource> > ResourceMap;
- ResourceMap mResources;
-
- // Create a AndroidMediaResourceServer that will listen on an automatically
- // selected port when started. This is private as it should only be
- // called internally from the public 'Start' method.
- AndroidMediaResourceServer();
- NS_IMETHOD Run();
-
- // Append a random URL path to a string. This is used for creating a
- // unique URl for a resource which helps prevent malicious software
- // running on the same machine as the server from guessing the URL
- // and accessing video data.
- nsresult AppendRandomPath(nsCString& aURL);
-
-public:
- // Create a AndroidMediaResourceServer and start it listening. This call will
- // perform a synchronous request on the main thread.
- static already_AddRefed<AndroidMediaResourceServer> Start();
-
- // Stops the server from listening and accepting further connections.
- void Stop();
-
- // Add a MediaResource to be served by this server. Stores the
- // absolute URL that can be used to access the resource in 'aUrl'.
- nsresult AddResource(mozilla::MediaResource* aResource, nsCString& aUrl);
-
- // Remove a MediaResource so it is no longer served by this server.
- // The URL provided must match exactly that provided by a previous
- // call to "AddResource".
- void RemoveResource(nsCString const& aUrl);
-
- // Returns the prefix for HTTP requests to the server. This plus
- // the result of AddResource results in an Absolute URL.
- nsCString GetURLPrefix();
-
- // Returns the resource asociated with a given URL
- already_AddRefed<mozilla::MediaResource> GetResource(nsCString const& aUrl);
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/MPAPI.h b/dom/media/android/MPAPI.h
deleted file mode 100644
index 9b289ca09..000000000
--- a/dom/media/android/MPAPI.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(MPAPI_h_)
-#define MPAPI_h_
-
-#include <stdint.h>
-
-namespace MPAPI {
-
-enum ColorFormat {
- I420,
- RGB565
-};
-
-/*
- * A callback for the plugin to use to request a buffer owned by gecko. This can
- * save us a copy or two down the line.
- */
-class BufferCallback {
-public:
- virtual void *operator()(size_t aWidth, size_t aHeight,
- ColorFormat aColorFormat) = 0;
-};
-
-struct VideoPlane {
- VideoPlane() :
- mData(0),
- mStride(0),
- mWidth(0),
- mHeight(0),
- mOffset(0),
- mSkip(0)
- {}
-
- void *mData;
- int32_t mStride;
- int32_t mWidth;
- int32_t mHeight;
- int32_t mOffset;
- int32_t mSkip;
-};
-
-struct VideoFrame {
- int64_t mTimeUs;
- bool mKeyFrame;
- void *mData;
- size_t mSize;
- int32_t mStride;
- int32_t mSliceHeight;
- int32_t mRotation;
- VideoPlane Y;
- VideoPlane Cb;
- VideoPlane Cr;
-
- VideoFrame() :
- mTimeUs(0),
- mKeyFrame(false),
- mData(0),
- mSize(0),
- mStride(0),
- mSliceHeight(0),
- mRotation(0)
- {}
-
- void Set(int64_t aTimeUs, bool aKeyFrame,
- void *aData, size_t aSize, int32_t aStride, int32_t aSliceHeight, int32_t aRotation,
- void *aYData, int32_t aYStride, int32_t aYWidth, int32_t aYHeight, int32_t aYOffset, int32_t aYSkip,
- void *aCbData, int32_t aCbStride, int32_t aCbWidth, int32_t aCbHeight, int32_t aCbOffset, int32_t aCbSkip,
- void *aCrData, int32_t aCrStride, int32_t aCrWidth, int32_t aCrHeight, int32_t aCrOffset, int32_t aCrSkip)
- {
- mTimeUs = aTimeUs;
- mKeyFrame = aKeyFrame;
- mData = aData;
- mSize = aSize;
- mStride = aStride;
- mSliceHeight = aSliceHeight;
- mRotation = aRotation;
- Y.mData = aYData;
- Y.mStride = aYStride;
- Y.mWidth = aYWidth;
- Y.mHeight = aYHeight;
- Y.mOffset = aYOffset;
- Y.mSkip = aYSkip;
- Cb.mData = aCbData;
- Cb.mStride = aCbStride;
- Cb.mWidth = aCbWidth;
- Cb.mHeight = aCbHeight;
- Cb.mOffset = aCbOffset;
- Cb.mSkip = aCbSkip;
- Cr.mData = aCrData;
- Cr.mStride = aCrStride;
- Cr.mWidth = aCrWidth;
- Cr.mHeight = aCrHeight;
- Cr.mOffset = aCrOffset;
- Cr.mSkip = aCrSkip;
- }
-};
-
-struct AudioFrame {
- int64_t mTimeUs;
- void *mData; // 16PCM interleaved
- size_t mSize; // Size of mData in bytes
- int32_t mAudioChannels;
- int32_t mAudioSampleRate;
-
- AudioFrame() :
- mTimeUs(0),
- mData(0),
- mSize(0),
- mAudioChannels(0),
- mAudioSampleRate(0)
- {
- }
-
- void Set(int64_t aTimeUs,
- void *aData, size_t aSize,
- int32_t aAudioChannels, int32_t aAudioSampleRate)
- {
- mTimeUs = aTimeUs;
- mData = aData;
- mSize = aSize;
- mAudioChannels = aAudioChannels;
- mAudioSampleRate = aAudioSampleRate;
- }
-};
-
-struct Decoder;
-
-struct PluginHost {
- bool (*Read)(Decoder *aDecoder, char *aBuffer, int64_t aOffset, uint32_t aCount, uint32_t* aBytes);
- uint64_t (*GetLength)(Decoder *aDecoder);
- void (*SetMetaDataReadMode)(Decoder *aDecoder);
- void (*SetPlaybackReadMode)(Decoder *aDecoder);
- bool (*GetIntPref)(const char *aPref, int32_t *aResult);
- bool (*GetSystemInfoString)(const char *aKey, char *aResult, size_t aResultLen);
-};
-
-struct Decoder {
- void *mResource;
- void *mPrivate;
-
- Decoder();
-
- void (*GetDuration)(Decoder *aDecoder, int64_t *durationUs);
- void (*GetVideoParameters)(Decoder *aDecoder, int32_t *aWidth, int32_t *aHeight);
- void (*GetAudioParameters)(Decoder *aDecoder, int32_t *aNumChannels, int32_t *aSampleRate);
- bool (*HasVideo)(Decoder *aDecoder);
- bool (*HasAudio)(Decoder *aDecoder);
- bool (*ReadVideo)(Decoder *aDecoder, VideoFrame *aFrame, int64_t aSeekTimeUs, BufferCallback *aBufferCallback);
- bool (*ReadAudio)(Decoder *aDecoder, AudioFrame *aFrame, int64_t aSeekTimeUs);
- void (*DestroyDecoder)(Decoder *);
-};
-
-struct Manifest {
- bool (*CanDecode)(const char *aMimeChars, size_t aMimeLen, const char* const**aCodecs);
- bool (*CreateDecoder)(PluginHost *aPluginHost, Decoder *aDecoder,
- const char *aMimeChars, size_t aMimeLen);
-};
-
-}
-
-#endif
diff --git a/dom/media/android/moz.build b/dom/media/android/moz.build
deleted file mode 100644
index 3ad43cd50..000000000
--- a/dom/media/android/moz.build
+++ /dev/null
@@ -1,27 +0,0 @@
-# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
-# vim: set filetype=python:
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-EXPORTS += [
- 'AndroidMediaDecoder.h',
- 'AndroidMediaPluginHost.h',
- 'AndroidMediaReader.h',
- 'AndroidMediaResourceServer.h',
- 'MPAPI.h',
-]
-
-UNIFIED_SOURCES += [
- 'AndroidMediaDecoder.cpp',
- 'AndroidMediaPluginHost.cpp',
- 'AndroidMediaReader.cpp',
- 'AndroidMediaResourceServer.cpp',
-]
-
-LOCAL_INCLUDES += [
- '/dom/base',
- '/dom/html',
-]
-
-FINAL_LIBRARY = 'xul'
diff --git a/dom/media/directshow/AudioSinkFilter.cpp b/dom/media/directshow/AudioSinkFilter.cpp
deleted file mode 100644
index 9f23c0e00..000000000
--- a/dom/media/directshow/AudioSinkFilter.cpp
+++ /dev/null
@@ -1,285 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "SampleSink.h"
-#include "AudioSinkFilter.h"
-#include "AudioSinkInputPin.h"
-#include "VideoUtils.h"
-#include "mozilla/Logging.h"
-
-
-#include <initguid.h>
-#include <wmsdkidl.h>
-
-#define DELETE_RESET(p) { delete (p) ; (p) = nullptr ;}
-
-DEFINE_GUID(CLSID_MozAudioSinkFilter, 0x1872d8c8, 0xea8d, 0x4c34, 0xae, 0x96, 0x69, 0xde,
- 0xf1, 0x33, 0x7b, 0x33);
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-AudioSinkFilter::AudioSinkFilter(const wchar_t* aObjectName, HRESULT* aOutResult)
- : BaseFilter(aObjectName, CLSID_MozAudioSinkFilter),
- mFilterCritSec("AudioSinkFilter::mFilterCritSec")
-{
- (*aOutResult) = S_OK;
- mInputPin = new AudioSinkInputPin(L"AudioSinkInputPin",
- this,
- &mFilterCritSec,
- aOutResult);
-}
-
-AudioSinkFilter::~AudioSinkFilter()
-{
-}
-
-int
-AudioSinkFilter::GetPinCount()
-{
- return 1;
-}
-
-BasePin*
-AudioSinkFilter::GetPin(int aIndex)
-{
- CriticalSectionAutoEnter lockFilter(mFilterCritSec);
- return (aIndex == 0) ? static_cast<BasePin*>(mInputPin) : nullptr;
-}
-
-HRESULT
-AudioSinkFilter::Pause()
-{
- CriticalSectionAutoEnter lockFilter(mFilterCritSec);
- if (mState == State_Stopped) {
- // Change the state, THEN activate the input pin.
- mState = State_Paused;
- if (mInputPin && mInputPin->IsConnected()) {
- mInputPin->Active();
- }
- } else if (mState == State_Running) {
- mState = State_Paused;
- }
- return S_OK;
-}
-
-HRESULT
-AudioSinkFilter::Stop()
-{
- CriticalSectionAutoEnter lockFilter(mFilterCritSec);
- mState = State_Stopped;
- if (mInputPin) {
- mInputPin->Inactive();
- }
-
- GetSampleSink()->Flush();
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkFilter::Run(REFERENCE_TIME tStart)
-{
- LOG("AudioSinkFilter::Run(%lld) [%4.2lf]",
- RefTimeToUsecs(tStart),
- double(RefTimeToUsecs(tStart)) / USECS_PER_S);
- return media::BaseFilter::Run(tStart);
-}
-
-HRESULT
-AudioSinkFilter::GetClassID( OUT CLSID * pCLSID )
-{
- (* pCLSID) = CLSID_MozAudioSinkFilter;
- return S_OK;
-}
-
-HRESULT
-AudioSinkFilter::QueryInterface(REFIID aIId, void **aInterface)
-{
- if (aIId == IID_IMediaSeeking) {
- *aInterface = static_cast<IMediaSeeking*>(this);
- AddRef();
- return S_OK;
- }
- return mozilla::media::BaseFilter::QueryInterface(aIId, aInterface);
-}
-
-ULONG
-AudioSinkFilter::AddRef()
-{
- return ::InterlockedIncrement(&mRefCnt);
-}
-
-ULONG
-AudioSinkFilter::Release()
-{
- unsigned long newRefCnt = ::InterlockedDecrement(&mRefCnt);
- if (!newRefCnt) {
- delete this;
- }
- return newRefCnt;
-}
-
-SampleSink*
-AudioSinkFilter::GetSampleSink()
-{
- return mInputPin->GetSampleSink();
-}
-
-
-// IMediaSeeking implementation.
-//
-// Calls to IMediaSeeking are forwarded to the output pin that the
-// AudioSinkInputPin is connected to, i.e. upstream towards the parser and
-// source filters, which actually implement seeking.
-#define ENSURE_CONNECTED_PIN_SEEKING \
- if (!mInputPin) { \
- return E_NOTIMPL; \
- } \
- RefPtr<IMediaSeeking> pinSeeking = mInputPin->GetConnectedPinSeeking(); \
- if (!pinSeeking) { \
- return E_NOTIMPL; \
- }
-
-HRESULT
-AudioSinkFilter::GetCapabilities(DWORD* aCapabilities)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetCapabilities(aCapabilities);
-}
-
-HRESULT
-AudioSinkFilter::CheckCapabilities(DWORD* aCapabilities)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->CheckCapabilities(aCapabilities);
-}
-
-HRESULT
-AudioSinkFilter::IsFormatSupported(const GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->IsFormatSupported(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::QueryPreferredFormat(GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->QueryPreferredFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::GetTimeFormat(GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetTimeFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::IsUsingTimeFormat(const GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->IsUsingTimeFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::SetTimeFormat(const GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->SetTimeFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::GetDuration(LONGLONG* aDuration)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetDuration(aDuration);
-}
-
-HRESULT
-AudioSinkFilter::GetStopPosition(LONGLONG* aStop)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetStopPosition(aStop);
-}
-
-HRESULT
-AudioSinkFilter::GetCurrentPosition(LONGLONG* aCurrent)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetCurrentPosition(aCurrent);
-}
-
-HRESULT
-AudioSinkFilter::ConvertTimeFormat(LONGLONG* aTarget,
- const GUID* aTargetFormat,
- LONGLONG aSource,
- const GUID* aSourceFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->ConvertTimeFormat(aTarget,
- aTargetFormat,
- aSource,
- aSourceFormat);
-}
-
-HRESULT
-AudioSinkFilter::SetPositions(LONGLONG* aCurrent,
- DWORD aCurrentFlags,
- LONGLONG* aStop,
- DWORD aStopFlags)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->SetPositions(aCurrent,
- aCurrentFlags,
- aStop,
- aStopFlags);
-}
-
-HRESULT
-AudioSinkFilter::GetPositions(LONGLONG* aCurrent,
- LONGLONG* aStop)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetPositions(aCurrent, aStop);
-}
-
-HRESULT
-AudioSinkFilter::GetAvailable(LONGLONG* aEarliest,
- LONGLONG* aLatest)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetAvailable(aEarliest, aLatest);
-}
-
-HRESULT
-AudioSinkFilter::SetRate(double aRate)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->SetRate(aRate);
-}
-
-HRESULT
-AudioSinkFilter::GetRate(double* aRate)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetRate(aRate);
-}
-
-HRESULT
-AudioSinkFilter::GetPreroll(LONGLONG* aPreroll)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetPreroll(aPreroll);
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/AudioSinkFilter.h b/dom/media/directshow/AudioSinkFilter.h
deleted file mode 100644
index 85abdfccf..000000000
--- a/dom/media/directshow/AudioSinkFilter.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(AudioSinkFilter_h_)
-#define AudioSinkFilter_h_
-
-#include "BaseFilter.h"
-#include "DirectShowUtils.h"
-#include "nsAutoPtr.h"
-#include "mozilla/RefPtr.h"
-
-namespace mozilla {
-
-class AudioSinkInputPin;
-class SampleSink;
-
-// Filter that acts as the end of the graph. Audio samples input into
-// this filter block the calling thread, and the calling thread is
-// unblocked when the decode thread extracts the sample. The samples
-// input into this filter are stored in the SampleSink, where the blocking
-// is implemented. The input pin owns the SampleSink.
-class AudioSinkFilter: public mozilla::media::BaseFilter,
- public IMediaSeeking
-{
-
-public:
- AudioSinkFilter(const wchar_t* aObjectName, HRESULT* aOutResult);
- virtual ~AudioSinkFilter();
-
- // Gets the input pin's sample sink.
- SampleSink* GetSampleSink();
-
- // IUnknown implementation.
- STDMETHODIMP QueryInterface(REFIID aIId, void **aInterface);
- STDMETHODIMP_(ULONG) AddRef();
- STDMETHODIMP_(ULONG) Release();
-
- // --------------------------------------------------------------------
- // CBaseFilter methods
- int GetPinCount ();
- mozilla::media::BasePin* GetPin ( IN int Index);
- STDMETHODIMP Pause ();
- STDMETHODIMP Stop ();
- STDMETHODIMP GetClassID ( OUT CLSID * pCLSID);
- STDMETHODIMP Run(REFERENCE_TIME tStart);
- // IMediaSeeking Methods...
-
- // We defer to SourceFilter, but we must expose the interface on
- // the output pins. Seeking commands come upstream from the renderers,
- // but they must be actioned at the source filters.
- STDMETHODIMP GetCapabilities(DWORD* aCapabilities);
- STDMETHODIMP CheckCapabilities(DWORD* aCapabilities);
- STDMETHODIMP IsFormatSupported(const GUID* aFormat);
- STDMETHODIMP QueryPreferredFormat(GUID* aFormat);
- STDMETHODIMP GetTimeFormat(GUID* aFormat);
- STDMETHODIMP IsUsingTimeFormat(const GUID* aFormat);
- STDMETHODIMP SetTimeFormat(const GUID* aFormat);
- STDMETHODIMP GetDuration(LONGLONG* pDuration);
- STDMETHODIMP GetStopPosition(LONGLONG* pStop);
- STDMETHODIMP GetCurrentPosition(LONGLONG* aCurrent);
- STDMETHODIMP ConvertTimeFormat(LONGLONG* aTarget,
- const GUID* aTargetFormat,
- LONGLONG aSource,
- const GUID* aSourceFormat);
- STDMETHODIMP SetPositions(LONGLONG* aCurrent,
- DWORD aCurrentFlags,
- LONGLONG* aStop,
- DWORD aStopFlags);
- STDMETHODIMP GetPositions(LONGLONG* aCurrent,
- LONGLONG* aStop);
- STDMETHODIMP GetAvailable(LONGLONG* aEarliest,
- LONGLONG* aLatest);
- STDMETHODIMP SetRate(double aRate);
- STDMETHODIMP GetRate(double* aRate);
- STDMETHODIMP GetPreroll(LONGLONG* aPreroll);
-
- // --------------------------------------------------------------------
- // class factory calls this
- static IUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
-
-private:
- CriticalSection mFilterCritSec;
-
- // Note: The input pin defers its refcounting to the sink filter, so when
- // the input pin is addrefed, what actually happens is the sink filter is
- // addrefed.
- nsAutoPtr<AudioSinkInputPin> mInputPin;
-};
-
-} // namespace mozilla
-
-#endif // AudioSinkFilter_h_
diff --git a/dom/media/directshow/AudioSinkInputPin.cpp b/dom/media/directshow/AudioSinkInputPin.cpp
deleted file mode 100644
index 85a6e3da3..000000000
--- a/dom/media/directshow/AudioSinkInputPin.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "AudioSinkInputPin.h"
-#include "AudioSinkFilter.h"
-#include "SampleSink.h"
-#include "mozilla/Logging.h"
-
-#include <wmsdkidl.h>
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-AudioSinkInputPin::AudioSinkInputPin(wchar_t* aObjectName,
- AudioSinkFilter* aFilter,
- mozilla::CriticalSection* aLock,
- HRESULT* aOutResult)
- : BaseInputPin(aObjectName, aFilter, aLock, aOutResult, aObjectName),
- mSegmentStartTime(0)
-{
- MOZ_COUNT_CTOR(AudioSinkInputPin);
- mSampleSink = new SampleSink();
-}
-
-AudioSinkInputPin::~AudioSinkInputPin()
-{
- MOZ_COUNT_DTOR(AudioSinkInputPin);
-}
-
-HRESULT
-AudioSinkInputPin::GetMediaType(int aPosition, MediaType* aOutMediaType)
-{
- NS_ENSURE_TRUE(aPosition >= 0, E_INVALIDARG);
- NS_ENSURE_TRUE(aOutMediaType, E_POINTER);
-
- if (aPosition > 0) {
- return S_FALSE;
- }
-
- // Note: We set output as PCM, as IEEE_FLOAT only works when using the
- // MP3 decoder as an MFT, and we can't do that while using DirectShow.
- aOutMediaType->SetType(&MEDIATYPE_Audio);
- aOutMediaType->SetSubtype(&MEDIASUBTYPE_PCM);
- aOutMediaType->SetType(&FORMAT_WaveFormatEx);
- aOutMediaType->SetTemporalCompression(FALSE);
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::CheckMediaType(const MediaType* aMediaType)
-{
- if (!aMediaType) {
- return E_INVALIDARG;
- }
-
- GUID majorType = *aMediaType->Type();
- if (majorType != MEDIATYPE_Audio && majorType != WMMEDIATYPE_Audio) {
- return E_INVALIDARG;
- }
-
- if (*aMediaType->Subtype() != MEDIASUBTYPE_PCM) {
- return E_INVALIDARG;
- }
-
- if (*aMediaType->FormatType() != FORMAT_WaveFormatEx) {
- return E_INVALIDARG;
- }
-
- // We accept the media type, stash its layout format!
- WAVEFORMATEX* wfx = (WAVEFORMATEX*)(aMediaType->pbFormat);
- GetSampleSink()->SetAudioFormat(wfx);
-
- return S_OK;
-}
-
-AudioSinkFilter*
-AudioSinkInputPin::GetAudioSinkFilter()
-{
- return reinterpret_cast<AudioSinkFilter*>(mFilter);
-}
-
-SampleSink*
-AudioSinkInputPin::GetSampleSink()
-{
- return mSampleSink;
-}
-
-HRESULT
-AudioSinkInputPin::SetAbsoluteMediaTime(IMediaSample* aSample)
-{
- HRESULT hr;
- REFERENCE_TIME start = 0, end = 0;
- hr = aSample->GetTime(&start, &end);
- NS_ENSURE_TRUE(SUCCEEDED(hr), E_FAIL);
- {
- CriticalSectionAutoEnter lock(*mLock);
- start += mSegmentStartTime;
- end += mSegmentStartTime;
- }
- hr = aSample->SetMediaTime(&start, &end);
- NS_ENSURE_TRUE(SUCCEEDED(hr), E_FAIL);
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::Receive(IMediaSample* aSample )
-{
- HRESULT hr;
- NS_ENSURE_TRUE(aSample, E_POINTER);
-
- hr = BaseInputPin::Receive(aSample);
- if (SUCCEEDED(hr) && hr != S_FALSE) { // S_FALSE == flushing
- // Set the timestamp of the sample after being adjusted for
- // seeking/segments in the "media time" attribute. When we seek,
- // DirectShow starts a new "segment", and starts labeling samples
- // from time=0 again, so we need to correct for this to get the
- // actual timestamps after seeking.
- hr = SetAbsoluteMediaTime(aSample);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
- hr = GetSampleSink()->Receive(aSample);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
- }
- return S_OK;
-}
-
-already_AddRefed<IMediaSeeking>
-AudioSinkInputPin::GetConnectedPinSeeking()
-{
- RefPtr<IPin> peer = GetConnected();
- if (!peer)
- return nullptr;
- RefPtr<IMediaSeeking> seeking;
- peer->QueryInterface(static_cast<IMediaSeeking**>(getter_AddRefs(seeking)));
- return seeking.forget();
-}
-
-HRESULT
-AudioSinkInputPin::BeginFlush()
-{
- HRESULT hr = media::BaseInputPin::BeginFlush();
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- GetSampleSink()->Flush();
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::EndFlush()
-{
- HRESULT hr = media::BaseInputPin::EndFlush();
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- // Reset the EOS flag, so that if we're called after a seek we still work.
- GetSampleSink()->Reset();
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::EndOfStream(void)
-{
- HRESULT hr = media::BaseInputPin::EndOfStream();
- if (FAILED(hr) || hr == S_FALSE) {
- // Pin is stil flushing.
- return hr;
- }
- GetSampleSink()->SetEOS();
-
- return S_OK;
-}
-
-
-HRESULT
-AudioSinkInputPin::NewSegment(REFERENCE_TIME tStart,
- REFERENCE_TIME tStop,
- double dRate)
-{
- CriticalSectionAutoEnter lock(*mLock);
- // Record the start time of the new segment, so that we can store the
- // correct absolute timestamp in the "media time" each incoming sample.
- mSegmentStartTime = tStart;
- return S_OK;
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/AudioSinkInputPin.h b/dom/media/directshow/AudioSinkInputPin.h
deleted file mode 100644
index 80503c641..000000000
--- a/dom/media/directshow/AudioSinkInputPin.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(AudioSinkInputPin_h_)
-#define AudioSinkInputPin_h_
-
-#include "BaseInputPin.h"
-#include "DirectShowUtils.h"
-#include "mozilla/RefPtr.h"
-#include "nsAutoPtr.h"
-
-namespace mozilla {
-
-namespace media {
- class MediaType;
-}
-
-class AudioSinkFilter;
-class SampleSink;
-
-
-// Input pin for capturing audio output of a DirectShow filter graph.
-// This is the input pin for the AudioSinkFilter.
-class AudioSinkInputPin: public mozilla::media::BaseInputPin
-{
-public:
- AudioSinkInputPin(wchar_t* aObjectName,
- AudioSinkFilter* aFilter,
- mozilla::CriticalSection* aLock,
- HRESULT* aOutResult);
- virtual ~AudioSinkInputPin();
-
- HRESULT GetMediaType (IN int iPos, OUT mozilla::media::MediaType * pmt);
- HRESULT CheckMediaType (IN const mozilla::media::MediaType * pmt);
- STDMETHODIMP Receive (IN IMediaSample *);
- STDMETHODIMP BeginFlush() override;
- STDMETHODIMP EndFlush() override;
-
- // Called when we start decoding a new segment, that happens directly after
- // a seek. This captures the segment's start time. Samples decoded by the
- // MP3 decoder have their timestamps offset from the segment start time.
- // Storing the segment start time enables us to set each sample's MediaTime
- // as an offset in the stream relative to the start of the stream, rather
- // than the start of the segment, i.e. its absolute time in the stream.
- STDMETHODIMP NewSegment(REFERENCE_TIME tStart,
- REFERENCE_TIME tStop,
- double dRate) override;
-
- STDMETHODIMP EndOfStream() override;
-
- // Returns the IMediaSeeking interface of the connected output pin.
- // We forward seeking requests upstream from the sink to the source
- // filters.
- already_AddRefed<IMediaSeeking> GetConnectedPinSeeking();
-
- SampleSink* GetSampleSink();
-
-private:
- AudioSinkFilter* GetAudioSinkFilter();
-
- // Sets the media time on the media sample, relative to the segment
- // start time.
- HRESULT SetAbsoluteMediaTime(IMediaSample* aSample);
-
- nsAutoPtr<SampleSink> mSampleSink;
-
- // Synchronized by the filter lock; BaseInputPin::mLock.
- REFERENCE_TIME mSegmentStartTime;
-};
-
-} // namespace mozilla
-
-#endif // AudioSinkInputPin_h_
diff --git a/dom/media/directshow/DirectShowDecoder.cpp b/dom/media/directshow/DirectShowDecoder.cpp
deleted file mode 100644
index da68b4daa..000000000
--- a/dom/media/directshow/DirectShowDecoder.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "DirectShowDecoder.h"
-#include "DirectShowReader.h"
-#include "DirectShowUtils.h"
-#include "MediaDecoderStateMachine.h"
-#include "mozilla/Preferences.h"
-#include "mozilla/WindowsVersion.h"
-
-namespace mozilla {
-
-MediaDecoderStateMachine* DirectShowDecoder::CreateStateMachine()
-{
- return new MediaDecoderStateMachine(this, new DirectShowReader(this));
-}
-
-/* static */
-bool
-DirectShowDecoder::GetSupportedCodecs(const nsACString& aType,
- char const *const ** aCodecList)
-{
- if (!IsEnabled()) {
- return false;
- }
-
- static char const *const mp3AudioCodecs[] = {
- "mp3",
- nullptr
- };
- if (aType.EqualsASCII("audio/mpeg") ||
- aType.EqualsASCII("audio/mp3")) {
- if (aCodecList) {
- *aCodecList = mp3AudioCodecs;
- }
- return true;
- }
-
- return false;
-}
-
-/* static */
-bool
-DirectShowDecoder::IsEnabled()
-{
- return CanDecodeMP3UsingDirectShow() &&
- Preferences::GetBool("media.directshow.enabled");
-}
-
-DirectShowDecoder::DirectShowDecoder(MediaDecoderOwner* aOwner)
- : MediaDecoder(aOwner)
-{
- MOZ_COUNT_CTOR(DirectShowDecoder);
-}
-
-DirectShowDecoder::~DirectShowDecoder()
-{
- MOZ_COUNT_DTOR(DirectShowDecoder);
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/DirectShowDecoder.h b/dom/media/directshow/DirectShowDecoder.h
deleted file mode 100644
index c4d371fbf..000000000
--- a/dom/media/directshow/DirectShowDecoder.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(DirectShowDecoder_h_)
-#define DirectShowDecoder_h_
-
-#include "MediaDecoder.h"
-
-namespace mozilla {
-
-// Decoder that uses DirectShow to playback MP3 files only.
-class DirectShowDecoder : public MediaDecoder
-{
-public:
-
- explicit DirectShowDecoder(MediaDecoderOwner* aOwner);
- virtual ~DirectShowDecoder();
-
- MediaDecoder* Clone(MediaDecoderOwner* aOwner) override {
- if (!IsEnabled()) {
- return nullptr;
- }
- return new DirectShowDecoder(aOwner);
- }
-
- MediaDecoderStateMachine* CreateStateMachine() override;
-
- // Returns true if aType is a MIME type that we render with the
- // DirectShow backend. If aCodecList is non null,
- // it is filled with a (static const) null-terminated list of strings
- // denoting the codecs we'll playback. Note that playback is strictly
- // limited to MP3 only.
- static bool GetSupportedCodecs(const nsACString& aType,
- char const *const ** aCodecList);
-
- // Returns true if the DirectShow backend is preffed on.
- static bool IsEnabled();
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/directshow/DirectShowReader.cpp b/dom/media/directshow/DirectShowReader.cpp
deleted file mode 100644
index cacf6f8de..000000000
--- a/dom/media/directshow/DirectShowReader.cpp
+++ /dev/null
@@ -1,360 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "DirectShowReader.h"
-#include "MediaDecoderReader.h"
-#include "mozilla/RefPtr.h"
-#include "DirectShowUtils.h"
-#include "AudioSinkFilter.h"
-#include "SourceFilter.h"
-#include "SampleSink.h"
-#include "VideoUtils.h"
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-// Windows XP's MP3 decoder filter. This is available on XP only, on Vista
-// and later we can use the DMO Wrapper filter and MP3 decoder DMO.
-const GUID DirectShowReader::CLSID_MPEG_LAYER_3_DECODER_FILTER =
-{ 0x38BE3000, 0xDBF4, 0x11D0, {0x86, 0x0E, 0x00, 0xA0, 0x24, 0xCF, 0xEF, 0x6D} };
-
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-DirectShowReader::DirectShowReader(AbstractMediaDecoder* aDecoder)
- : MediaDecoderReader(aDecoder),
- mMP3FrameParser(aDecoder->GetResource()->GetLength()),
-#ifdef DIRECTSHOW_REGISTER_GRAPH
- mRotRegister(0),
-#endif
- mNumChannels(0),
- mAudioRate(0),
- mBytesPerSample(0)
-{
- MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
- MOZ_COUNT_CTOR(DirectShowReader);
-}
-
-DirectShowReader::~DirectShowReader()
-{
- MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
- MOZ_COUNT_DTOR(DirectShowReader);
-#ifdef DIRECTSHOW_REGISTER_GRAPH
- if (mRotRegister) {
- RemoveGraphFromRunningObjectTable(mRotRegister);
- }
-#endif
-}
-
-// Try to parse the MP3 stream to make sure this is indeed an MP3, get the
-// estimated duration of the stream, and find the offset of the actual MP3
-// frames in the stream, as DirectShow doesn't like large ID3 sections.
-static nsresult
-ParseMP3Headers(MP3FrameParser *aParser, MediaResource *aResource)
-{
- const uint32_t MAX_READ_SIZE = 4096;
-
- uint64_t offset = 0;
- while (aParser->NeedsData() && !aParser->ParsedHeaders()) {
- uint32_t bytesRead;
- char buffer[MAX_READ_SIZE];
- nsresult rv = aResource->ReadAt(offset, buffer,
- MAX_READ_SIZE, &bytesRead);
- NS_ENSURE_SUCCESS(rv, rv);
-
- if (!bytesRead) {
- // End of stream.
- return NS_ERROR_FAILURE;
- }
-
- aParser->Parse(reinterpret_cast<uint8_t*>(buffer), bytesRead, offset);
- offset += bytesRead;
- }
-
- return aParser->IsMP3() ? NS_OK : NS_ERROR_FAILURE;
-}
-
-nsresult
-DirectShowReader::ReadMetadata(MediaInfo* aInfo,
- MetadataTags** aTags)
-{
- MOZ_ASSERT(OnTaskQueue());
- HRESULT hr;
- nsresult rv;
-
- // Create the filter graph, reference it by the GraphBuilder interface,
- // to make graph building more convenient.
- hr = CoCreateInstance(CLSID_FilterGraph,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IGraphBuilder,
- reinterpret_cast<void**>(static_cast<IGraphBuilder**>(getter_AddRefs(mGraph))));
- NS_ENSURE_TRUE(SUCCEEDED(hr) && mGraph, NS_ERROR_FAILURE);
-
- rv = ParseMP3Headers(&mMP3FrameParser, mDecoder->GetResource());
- NS_ENSURE_SUCCESS(rv, rv);
-
- #ifdef DIRECTSHOW_REGISTER_GRAPH
- hr = AddGraphToRunningObjectTable(mGraph, &mRotRegister);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
- #endif
-
- // Extract the interface pointers we'll need from the filter graph.
- hr = mGraph->QueryInterface(static_cast<IMediaControl**>(getter_AddRefs(mControl)));
- NS_ENSURE_TRUE(SUCCEEDED(hr) && mControl, NS_ERROR_FAILURE);
-
- hr = mGraph->QueryInterface(static_cast<IMediaSeeking**>(getter_AddRefs(mMediaSeeking)));
- NS_ENSURE_TRUE(SUCCEEDED(hr) && mMediaSeeking, NS_ERROR_FAILURE);
-
- // Build the graph. Create the filters we need, and connect them. We
- // build the entire graph ourselves to prevent other decoders installed
- // on the system being created and used.
-
- // Our source filters, wraps the MediaResource.
- mSourceFilter = new SourceFilter(MEDIATYPE_Stream, MEDIASUBTYPE_MPEG1Audio);
- NS_ENSURE_TRUE(mSourceFilter, NS_ERROR_FAILURE);
-
- rv = mSourceFilter->Init(mDecoder->GetResource(), mMP3FrameParser.GetMP3Offset());
- NS_ENSURE_SUCCESS(rv, rv);
-
- hr = mGraph->AddFilter(mSourceFilter, L"MozillaDirectShowSource");
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- // The MPEG demuxer.
- RefPtr<IBaseFilter> demuxer;
- hr = CreateAndAddFilter(mGraph,
- CLSID_MPEG1Splitter,
- L"MPEG1Splitter",
- getter_AddRefs(demuxer));
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- // Platform MP3 decoder.
- RefPtr<IBaseFilter> decoder;
- // Firstly try to create the MP3 decoder filter that ships with WinXP
- // directly. This filter doesn't normally exist on later versions of
- // Windows.
- hr = CreateAndAddFilter(mGraph,
- CLSID_MPEG_LAYER_3_DECODER_FILTER,
- L"MPEG Layer 3 Decoder",
- getter_AddRefs(decoder));
- if (FAILED(hr)) {
- // Failed to create MP3 decoder filter. Try to instantiate
- // the MP3 decoder DMO.
- hr = AddMP3DMOWrapperFilter(mGraph, getter_AddRefs(decoder));
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
- }
-
- // Sink, captures audio samples and inserts them into our pipeline.
- static const wchar_t* AudioSinkFilterName = L"MozAudioSinkFilter";
- mAudioSinkFilter = new AudioSinkFilter(AudioSinkFilterName, &hr);
- NS_ENSURE_TRUE(mAudioSinkFilter && SUCCEEDED(hr), NS_ERROR_FAILURE);
- hr = mGraph->AddFilter(mAudioSinkFilter, AudioSinkFilterName);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- // Join the filters.
- hr = ConnectFilters(mGraph, mSourceFilter, demuxer);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- hr = ConnectFilters(mGraph, demuxer, decoder);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- hr = ConnectFilters(mGraph, decoder, mAudioSinkFilter);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- WAVEFORMATEX format;
- mAudioSinkFilter->GetSampleSink()->GetAudioFormat(&format);
- NS_ENSURE_TRUE(format.wFormatTag == WAVE_FORMAT_PCM, NS_ERROR_FAILURE);
-
- mInfo.mAudio.mChannels = mNumChannels = format.nChannels;
- mInfo.mAudio.mRate = mAudioRate = format.nSamplesPerSec;
- mInfo.mAudio.mBitDepth = format.wBitsPerSample;
- mBytesPerSample = format.wBitsPerSample / 8;
-
- // Begin decoding!
- hr = mControl->Run();
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- DWORD seekCaps = 0;
- hr = mMediaSeeking->GetCapabilities(&seekCaps);
- mInfo.mMediaSeekable = SUCCEEDED(hr) && (AM_SEEKING_CanSeekAbsolute & seekCaps);
-
- int64_t duration = mMP3FrameParser.GetDuration();
- if (SUCCEEDED(hr)) {
- mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(duration));
- }
-
- LOG("Successfully initialized DirectShow MP3 decoder.");
- LOG("Channels=%u Hz=%u duration=%lld bytesPerSample=%d",
- mInfo.mAudio.mChannels,
- mInfo.mAudio.mRate,
- RefTimeToUsecs(duration),
- mBytesPerSample);
-
- *aInfo = mInfo;
- // Note: The SourceFilter strips ID3v2 tags out of the stream.
- *aTags = nullptr;
-
- return NS_OK;
-}
-
-inline float
-UnsignedByteToAudioSample(uint8_t aValue)
-{
- return aValue * (2.0f / UINT8_MAX) - 1.0f;
-}
-
-bool
-DirectShowReader::Finish(HRESULT aStatus)
-{
- MOZ_ASSERT(OnTaskQueue());
-
- LOG("DirectShowReader::Finish(0x%x)", aStatus);
- // Notify the filter graph of end of stream.
- RefPtr<IMediaEventSink> eventSink;
- HRESULT hr = mGraph->QueryInterface(static_cast<IMediaEventSink**>(getter_AddRefs(eventSink)));
- if (SUCCEEDED(hr) && eventSink) {
- eventSink->Notify(EC_COMPLETE, aStatus, 0);
- }
- return false;
-}
-
-class DirectShowCopy
-{
-public:
- DirectShowCopy(uint8_t *aSource, uint32_t aBytesPerSample,
- uint32_t aSamples, uint32_t aChannels)
- : mSource(aSource)
- , mBytesPerSample(aBytesPerSample)
- , mSamples(aSamples)
- , mChannels(aChannels)
- , mNextSample(0)
- { }
-
- uint32_t operator()(AudioDataValue *aBuffer, uint32_t aSamples)
- {
- uint32_t maxSamples = std::min(aSamples, mSamples - mNextSample);
- uint32_t frames = maxSamples / mChannels;
- size_t byteOffset = mNextSample * mBytesPerSample;
- if (mBytesPerSample == 1) {
- for (uint32_t i = 0; i < maxSamples; ++i) {
- uint8_t *sample = mSource + byteOffset;
- aBuffer[i] = UnsignedByteToAudioSample(*sample);
- byteOffset += mBytesPerSample;
- }
- } else if (mBytesPerSample == 2) {
- for (uint32_t i = 0; i < maxSamples; ++i) {
- int16_t *sample = reinterpret_cast<int16_t *>(mSource + byteOffset);
- aBuffer[i] = AudioSampleToFloat(*sample);
- byteOffset += mBytesPerSample;
- }
- }
- mNextSample += maxSamples;
- return frames;
- }
-
-private:
- uint8_t * const mSource;
- const uint32_t mBytesPerSample;
- const uint32_t mSamples;
- const uint32_t mChannels;
- uint32_t mNextSample;
-};
-
-bool
-DirectShowReader::DecodeAudioData()
-{
- MOZ_ASSERT(OnTaskQueue());
- HRESULT hr;
-
- SampleSink* sink = mAudioSinkFilter->GetSampleSink();
- if (sink->AtEOS()) {
- // End of stream.
- return Finish(S_OK);
- }
-
- // Get the next chunk of audio samples. This blocks until the sample
- // arrives, or an error occurs (like the stream is shutdown).
- RefPtr<IMediaSample> sample;
- hr = sink->Extract(sample);
- if (FAILED(hr) || hr == S_FALSE) {
- return Finish(hr);
- }
-
- int64_t start = 0, end = 0;
- sample->GetMediaTime(&start, &end);
- LOG("DirectShowReader::DecodeAudioData [%4.2lf-%4.2lf]",
- RefTimeToSeconds(start),
- RefTimeToSeconds(end));
-
- LONG length = sample->GetActualDataLength();
- LONG numSamples = length / mBytesPerSample;
- LONG numFrames = length / mBytesPerSample / mNumChannels;
-
- BYTE* data = nullptr;
- hr = sample->GetPointer(&data);
- NS_ENSURE_TRUE(SUCCEEDED(hr), Finish(hr));
-
- mAudioCompactor.Push(mDecoder->GetResource()->Tell(),
- RefTimeToUsecs(start),
- mInfo.mAudio.mRate,
- numFrames,
- mNumChannels,
- DirectShowCopy(reinterpret_cast<uint8_t *>(data),
- mBytesPerSample,
- numSamples,
- mNumChannels));
- return true;
-}
-
-bool
-DirectShowReader::DecodeVideoFrame(bool &aKeyframeSkip,
- int64_t aTimeThreshold)
-{
- MOZ_ASSERT(OnTaskQueue());
- return false;
-}
-
-RefPtr<MediaDecoderReader::SeekPromise>
-DirectShowReader::Seek(SeekTarget aTarget, int64_t aEndTime)
-{
- nsresult res = SeekInternal(aTarget.GetTime().ToMicroseconds());
- if (NS_FAILED(res)) {
- return SeekPromise::CreateAndReject(res, __func__);
- } else {
- return SeekPromise::CreateAndResolve(aTarget.GetTime(), __func__);
- }
-}
-
-nsresult
-DirectShowReader::SeekInternal(int64_t aTargetUs)
-{
- HRESULT hr;
- MOZ_ASSERT(OnTaskQueue());
-
- LOG("DirectShowReader::Seek() target=%lld", aTargetUs);
-
- hr = mControl->Pause();
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- nsresult rv = ResetDecode();
- NS_ENSURE_SUCCESS(rv, rv);
-
- LONGLONG seekPosition = UsecsToRefTime(aTargetUs);
- hr = mMediaSeeking->SetPositions(&seekPosition,
- AM_SEEKING_AbsolutePositioning,
- nullptr,
- AM_SEEKING_NoPositioning);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- hr = mControl->Run();
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- return NS_OK;
-}
-
-} // namespace mozilla
diff --git a/dom/media/directshow/DirectShowReader.h b/dom/media/directshow/DirectShowReader.h
deleted file mode 100644
index e1326d416..000000000
--- a/dom/media/directshow/DirectShowReader.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(DirectShowReader_h_)
-#define DirectShowReader_h_
-
-#include "windows.h" // HRESULT, DWORD
-#include "MediaDecoderReader.h"
-#include "MediaResource.h"
-#include "mozilla/RefPtr.h"
-#include "MP3FrameParser.h"
-
-// Add the graph to the Running Object Table so that we can connect
-// to this graph with GraphEdit/GraphStudio. Note: on Vista and up you must
-// also regsvr32 proppage.dll from the Windows SDK.
-// See: http://msdn.microsoft.com/en-us/library/ms787252(VS.85).aspx
-// #define DIRECTSHOW_REGISTER_GRAPH
-
-struct IGraphBuilder;
-struct IMediaControl;
-struct IMediaSeeking;
-
-namespace mozilla {
-
-class AudioSinkFilter;
-class SourceFilter;
-
-// Decoder backend for decoding MP3 using DirectShow. DirectShow operates as
-// a filter graph. The basic design of the DirectShowReader is that we have
-// a SourceFilter that wraps the MediaResource that connects to the
-// MP3 decoder filter. The MP3 decoder filter "pulls" data as it requires it
-// downstream on its own thread. When the MP3 decoder has produced a block of
-// decoded samples, its thread calls downstream into our AudioSinkFilter,
-// passing the decoded buffer in. The AudioSinkFilter inserts the samples into
-// a SampleSink object. The SampleSink blocks the MP3 decoder's thread until
-// the decode thread calls DecodeAudioData(), whereupon the SampleSink
-// releases the decoded samples to the decode thread, and unblocks the MP3
-// decoder's thread. The MP3 decoder can then request more data from the
-// SourceFilter, and decode more data. If the decode thread calls
-// DecodeAudioData() and there's no decoded samples waiting to be extracted
-// in the SampleSink, the SampleSink blocks the decode thread until the MP3
-// decoder produces a decoded sample.
-class DirectShowReader : public MediaDecoderReader
-{
-public:
- DirectShowReader(AbstractMediaDecoder* aDecoder);
-
- virtual ~DirectShowReader();
-
- bool DecodeAudioData() override;
- bool DecodeVideoFrame(bool &aKeyframeSkip,
- int64_t aTimeThreshold) override;
-
- nsresult ReadMetadata(MediaInfo* aInfo,
- MetadataTags** aTags) override;
-
- RefPtr<SeekPromise>
- Seek(SeekTarget aTarget, int64_t aEndTime) override;
-
- static const GUID CLSID_MPEG_LAYER_3_DECODER_FILTER;
-
-private:
- // Notifies the filter graph that playback is complete. aStatus is
- // the code to send to the filter graph. Always returns false, so
- // that we can just "return Finish()" from DecodeAudioData().
- bool Finish(HRESULT aStatus);
-
- nsresult SeekInternal(int64_t aTime);
-
- // DirectShow filter graph, and associated playback and seeking
- // control interfaces.
- RefPtr<IGraphBuilder> mGraph;
- RefPtr<IMediaControl> mControl;
- RefPtr<IMediaSeeking> mMediaSeeking;
-
- // Wraps the MediaResource, and feeds undecoded data into the filter graph.
- RefPtr<SourceFilter> mSourceFilter;
-
- // Sits at the end of the graph, removing decoded samples from the graph.
- // The graph will block while this is blocked, i.e. it will pause decoding.
- RefPtr<AudioSinkFilter> mAudioSinkFilter;
-
- // Some MP3s are variable bitrate, so DirectShow's duration estimation
- // can make its duration estimation based on the wrong bitrate. So we parse
- // the MP3 frames to get a more accuate estimate of the duration.
- MP3FrameParser mMP3FrameParser;
-
-#ifdef DIRECTSHOW_REGISTER_GRAPH
- // Used to add/remove the filter graph to the Running Object Table. You can
- // connect GraphEdit/GraphStudio to the graph to observe and/or debug its
- // topology and state.
- DWORD mRotRegister;
-#endif
-
- // Number of channels in the audio stream.
- uint32_t mNumChannels;
-
- // Samples per second in the audio stream.
- uint32_t mAudioRate;
-
- // Number of bytes per sample. Can be either 1 or 2.
- uint32_t mBytesPerSample;
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/directshow/DirectShowUtils.cpp b/dom/media/directshow/DirectShowUtils.cpp
deleted file mode 100644
index b2afa7528..000000000
--- a/dom/media/directshow/DirectShowUtils.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "DirectShowUtils.h"
-#include "dmodshow.h"
-#include "wmcodecdsp.h"
-#include "dmoreg.h"
-#include "mozilla/ArrayUtils.h"
-#include "mozilla/RefPtr.h"
-#include "nsPrintfCString.h"
-
-#define WARN(...) NS_WARNING(nsPrintfCString(__VA_ARGS__).get())
-
-namespace mozilla {
-
-// Create a table which maps GUIDs to a string representation of the GUID.
-// This is useful for debugging purposes, for logging the GUIDs of media types.
-// This is only available when logging is enabled, i.e. not in release builds.
-struct GuidToName {
- const char* name;
- const GUID guid;
-};
-
-#pragma push_macro("OUR_GUID_ENTRY")
-#undef OUR_GUID_ENTRY
-#define OUR_GUID_ENTRY(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
- { #name, {l, w1, w2, {b1, b2, b3, b4, b5, b6, b7, b8}} },
-
-static const GuidToName GuidToNameTable[] = {
-#include <uuids.h>
-};
-
-#pragma pop_macro("OUR_GUID_ENTRY")
-
-const char*
-GetDirectShowGuidName(const GUID& aGuid)
-{
- const size_t len = ArrayLength(GuidToNameTable);
- for (unsigned i = 0; i < len; i++) {
- if (IsEqualGUID(aGuid, GuidToNameTable[i].guid)) {
- return GuidToNameTable[i].name;
- }
- }
- return "Unknown";
-}
-
-void
-RemoveGraphFromRunningObjectTable(DWORD aRotRegister)
-{
- RefPtr<IRunningObjectTable> runningObjectTable;
- if (SUCCEEDED(GetRunningObjectTable(0, getter_AddRefs(runningObjectTable)))) {
- runningObjectTable->Revoke(aRotRegister);
- }
-}
-
-HRESULT
-AddGraphToRunningObjectTable(IUnknown *aUnkGraph, DWORD *aOutRotRegister)
-{
- HRESULT hr;
-
- RefPtr<IMoniker> moniker;
- RefPtr<IRunningObjectTable> runningObjectTable;
-
- hr = GetRunningObjectTable(0, getter_AddRefs(runningObjectTable));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- const size_t STRING_LENGTH = 256;
- WCHAR wsz[STRING_LENGTH];
-
- StringCchPrintfW(wsz,
- STRING_LENGTH,
- L"FilterGraph %08x pid %08x",
- (DWORD_PTR)aUnkGraph,
- GetCurrentProcessId());
-
- hr = CreateItemMoniker(L"!", wsz, getter_AddRefs(moniker));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- hr = runningObjectTable->Register(ROTFLAGS_REGISTRATIONKEEPSALIVE,
- aUnkGraph,
- moniker,
- aOutRotRegister);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- return S_OK;
-}
-
-const char*
-GetGraphNotifyString(long evCode)
-{
-#define CASE(x) case x: return #x
- switch(evCode) {
- CASE(EC_ACTIVATE); // A video window is being activated or deactivated.
- CASE(EC_BANDWIDTHCHANGE); // Not supported.
- CASE(EC_BUFFERING_DATA); // The graph is buffering data, or has stopped buffering data.
- CASE(EC_BUILT); // Send by the Video Control when a graph has been built. Not forwarded to applications.
- CASE(EC_CLOCK_CHANGED); // The reference clock has changed.
- CASE(EC_CLOCK_UNSET); // The clock provider was disconnected.
- CASE(EC_CODECAPI_EVENT); // Sent by an encoder to signal an encoding event.
- CASE(EC_COMPLETE); // All data from a particular stream has been rendered.
- CASE(EC_CONTENTPROPERTY_CHANGED); // Not supported.
- CASE(EC_DEVICE_LOST); // A Plug and Play device was removed or has become available again.
- CASE(EC_DISPLAY_CHANGED); // The display mode has changed.
- CASE(EC_END_OF_SEGMENT); // The end of a segment has been reached.
- CASE(EC_EOS_SOON); // Not supported.
- CASE(EC_ERROR_STILLPLAYING); // An asynchronous command to run the graph has failed.
- CASE(EC_ERRORABORT); // An operation was aborted because of an error.
- CASE(EC_ERRORABORTEX); // An operation was aborted because of an error.
- CASE(EC_EXTDEVICE_MODE_CHANGE); // Not supported.
- CASE(EC_FILE_CLOSED); // The source file was closed because of an unexpected event.
- CASE(EC_FULLSCREEN_LOST); // The video renderer is switching out of full-screen mode.
- CASE(EC_GRAPH_CHANGED); // The filter graph has changed.
- CASE(EC_LENGTH_CHANGED); // The length of a source has changed.
- CASE(EC_LOADSTATUS); // Notifies the application of progress when opening a network file.
- CASE(EC_MARKER_HIT); // Not supported.
- CASE(EC_NEED_RESTART); // A filter is requesting that the graph be restarted.
- CASE(EC_NEW_PIN); // Not supported.
- CASE(EC_NOTIFY_WINDOW); // Notifies a filter of the video renderer's window.
- CASE(EC_OLE_EVENT); // A filter is passing a text string to the application.
- CASE(EC_OPENING_FILE); // The graph is opening a file, or has finished opening a file.
- CASE(EC_PALETTE_CHANGED); // The video palette has changed.
- CASE(EC_PAUSED); // A pause request has completed.
- CASE(EC_PLEASE_REOPEN); // The source file has changed.
- CASE(EC_PREPROCESS_COMPLETE); // Sent by the WM ASF Writer filter when it completes the pre-processing for multipass encoding.
- CASE(EC_PROCESSING_LATENCY); // Indicates the amount of time that a component is taking to process each sample.
- CASE(EC_QUALITY_CHANGE); // The graph is dropping samples, for quality control.
- //CASE(EC_RENDER_FINISHED); // Not supported.
- CASE(EC_REPAINT); // A video renderer requires a repaint.
- CASE(EC_SAMPLE_LATENCY); // Specifies how far behind schedule a component is for processing samples.
- //CASE(EC_SAMPLE_NEEDED); // Requests a new input sample from the Enhanced Video Renderer (EVR) filter.
- CASE(EC_SCRUB_TIME); // Specifies the time stamp for the most recent frame step.
- CASE(EC_SEGMENT_STARTED); // A new segment has started.
- CASE(EC_SHUTTING_DOWN); // The filter graph is shutting down, prior to being destroyed.
- CASE(EC_SNDDEV_IN_ERROR); // A device error has occurred in an audio capture filter.
- CASE(EC_SNDDEV_OUT_ERROR); // A device error has occurred in an audio renderer filter.
- CASE(EC_STARVATION); // A filter is not receiving enough data.
- CASE(EC_STATE_CHANGE); // The filter graph has changed state.
- CASE(EC_STATUS); // Contains two arbitrary status strings.
- CASE(EC_STEP_COMPLETE); // A filter performing frame stepping has stepped the specified number of frames.
- CASE(EC_STREAM_CONTROL_STARTED); // A stream-control start command has taken effect.
- CASE(EC_STREAM_CONTROL_STOPPED); // A stream-control stop command has taken effect.
- CASE(EC_STREAM_ERROR_STILLPLAYING); // An error has occurred in a stream. The stream is still playing.
- CASE(EC_STREAM_ERROR_STOPPED); // A stream has stopped because of an error.
- CASE(EC_TIMECODE_AVAILABLE); // Not supported.
- CASE(EC_UNBUILT); // Send by the Video Control when a graph has been torn down. Not forwarded to applications.
- CASE(EC_USERABORT); // The user has terminated playback.
- CASE(EC_VIDEO_SIZE_CHANGED); // The native video size has changed.
- CASE(EC_VIDEOFRAMEREADY); // A video frame is ready for display.
- CASE(EC_VMR_RECONNECTION_FAILED); // Sent by the VMR-7 and the VMR-9 when it was unable to accept a dynamic format change request from the upstream decoder.
- CASE(EC_VMR_RENDERDEVICE_SET); // Sent when the VMR has selected its rendering mechanism.
- CASE(EC_VMR_SURFACE_FLIPPED); // Sent when the VMR-7's allocator presenter has called the DirectDraw Flip method on the surface being presented.
- CASE(EC_WINDOW_DESTROYED); // The video renderer was destroyed or removed from the graph.
- CASE(EC_WMT_EVENT); // Sent by the WM ASF Reader filter when it reads ASF files protected by digital rights management (DRM).
- CASE(EC_WMT_INDEX_EVENT); // Sent when an application uses the WM ASF Writer to index Windows Media Video files.
- CASE(S_OK); // Success.
- CASE(VFW_S_AUDIO_NOT_RENDERED); // Partial success; the audio was not rendered.
- CASE(VFW_S_DUPLICATE_NAME); // Success; the Filter Graph Manager modified a filter name to avoid duplication.
- CASE(VFW_S_PARTIAL_RENDER); // Partial success; some of the streams in this movie are in an unsupported format.
- CASE(VFW_S_VIDEO_NOT_RENDERED); // Partial success; the video was not rendered.
- CASE(E_ABORT); // Operation aborted.
- CASE(E_OUTOFMEMORY); // Insufficient memory.
- CASE(E_POINTER); // Null pointer argument.
- CASE(VFW_E_CANNOT_CONNECT); // No combination of intermediate filters could be found to make the connection.
- CASE(VFW_E_CANNOT_RENDER); // No combination of filters could be found to render the stream.
- CASE(VFW_E_NO_ACCEPTABLE_TYPES); // There is no common media type between these pins.
- CASE(VFW_E_NOT_IN_GRAPH);
-
- default:
- return "Unknown Code";
- };
-#undef CASE
-}
-
-HRESULT
-CreateAndAddFilter(IGraphBuilder* aGraph,
- REFGUID aFilterClsId,
- LPCWSTR aFilterName,
- IBaseFilter **aOutFilter)
-{
- NS_ENSURE_TRUE(aGraph, E_POINTER);
- NS_ENSURE_TRUE(aOutFilter, E_POINTER);
- HRESULT hr;
-
- RefPtr<IBaseFilter> filter;
- hr = CoCreateInstance(aFilterClsId,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter));
- if (FAILED(hr)) {
- // Object probably not available on this system.
- WARN("CoCreateInstance failed, hr=%x", hr);
- return hr;
- }
-
- hr = aGraph->AddFilter(filter, aFilterName);
- if (FAILED(hr)) {
- WARN("AddFilter failed, hr=%x", hr);
- return hr;
- }
-
- filter.forget(aOutFilter);
-
- return S_OK;
-}
-
-HRESULT
-CreateMP3DMOWrapperFilter(IBaseFilter **aOutFilter)
-{
- NS_ENSURE_TRUE(aOutFilter, E_POINTER);
- HRESULT hr;
-
- // Create the wrapper filter.
- RefPtr<IBaseFilter> filter;
- hr = CoCreateInstance(CLSID_DMOWrapperFilter,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter));
- if (FAILED(hr)) {
- WARN("CoCreateInstance failed, hr=%x", hr);
- return hr;
- }
-
- // Query for IDMOWrapperFilter.
- RefPtr<IDMOWrapperFilter> dmoWrapper;
- hr = filter->QueryInterface(IID_IDMOWrapperFilter,
- getter_AddRefs(dmoWrapper));
- if (FAILED(hr)) {
- WARN("QueryInterface failed, hr=%x", hr);
- return hr;
- }
-
- hr = dmoWrapper->Init(CLSID_CMP3DecMediaObject, DMOCATEGORY_AUDIO_DECODER);
- if (FAILED(hr)) {
- // Can't instantiate MP3 DMO. It doesn't exist on Windows XP, we're
- // probably hitting that. Don't log warning to console, this is an
- // expected error.
- WARN("dmoWrapper Init failed, hr=%x", hr);
- return hr;
- }
-
- filter.forget(aOutFilter);
-
- return S_OK;
-}
-
-HRESULT
-AddMP3DMOWrapperFilter(IGraphBuilder* aGraph,
- IBaseFilter **aOutFilter)
-{
- NS_ENSURE_TRUE(aGraph, E_POINTER);
- NS_ENSURE_TRUE(aOutFilter, E_POINTER);
- HRESULT hr;
-
- // Create the wrapper filter.
- RefPtr<IBaseFilter> filter;
- hr = CreateMP3DMOWrapperFilter(getter_AddRefs(filter));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- // Add the wrapper filter to graph.
- hr = aGraph->AddFilter(filter, L"MP3 Decoder DMO");
- if (FAILED(hr)) {
- WARN("AddFilter failed, hr=%x", hr);
- return hr;
- }
-
- filter.forget(aOutFilter);
-
- return S_OK;
-}
-
-bool
-CanDecodeMP3UsingDirectShow()
-{
- RefPtr<IBaseFilter> filter;
-
- // Can we create the MP3 demuxer filter?
- if (FAILED(CoCreateInstance(CLSID_MPEG1Splitter,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter)))) {
- return false;
- }
-
- // Can we create either the WinXP MP3 decoder filter or the MP3 DMO decoder?
- if (FAILED(CoCreateInstance(DirectShowReader::CLSID_MPEG_LAYER_3_DECODER_FILTER,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter))) &&
- FAILED(CreateMP3DMOWrapperFilter(getter_AddRefs(filter)))) {
- return false;
- }
-
- // Else, we can create all of the components we need. Assume
- // DirectShow is going to work...
- return true;
-}
-
-// Match a pin by pin direction and connection state.
-HRESULT
-MatchUnconnectedPin(IPin* aPin,
- PIN_DIRECTION aPinDir,
- bool *aOutMatches)
-{
- NS_ENSURE_TRUE(aPin, E_POINTER);
- NS_ENSURE_TRUE(aOutMatches, E_POINTER);
-
- // Ensure the pin is unconnected.
- RefPtr<IPin> peer;
- HRESULT hr = aPin->ConnectedTo(getter_AddRefs(peer));
- if (hr != VFW_E_NOT_CONNECTED) {
- *aOutMatches = false;
- return hr;
- }
-
- // Ensure the pin is of the specified direction.
- PIN_DIRECTION pinDir;
- hr = aPin->QueryDirection(&pinDir);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- *aOutMatches = (pinDir == aPinDir);
- return S_OK;
-}
-
-// Return the first unconnected input pin or output pin.
-already_AddRefed<IPin>
-GetUnconnectedPin(IBaseFilter* aFilter, PIN_DIRECTION aPinDir)
-{
- RefPtr<IEnumPins> enumPins;
-
- HRESULT hr = aFilter->EnumPins(getter_AddRefs(enumPins));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- // Test each pin to see if it matches the direction we're looking for.
- RefPtr<IPin> pin;
- while (S_OK == enumPins->Next(1, getter_AddRefs(pin), nullptr)) {
- bool matches = FALSE;
- if (SUCCEEDED(MatchUnconnectedPin(pin, aPinDir, &matches)) &&
- matches) {
- return pin.forget();
- }
- }
-
- return nullptr;
-}
-
-HRESULT
-ConnectFilters(IGraphBuilder* aGraph,
- IBaseFilter* aOutputFilter,
- IBaseFilter* aInputFilter)
-{
- RefPtr<IPin> output = GetUnconnectedPin(aOutputFilter, PINDIR_OUTPUT);
- NS_ENSURE_TRUE(output, E_FAIL);
-
- RefPtr<IPin> input = GetUnconnectedPin(aInputFilter, PINDIR_INPUT);
- NS_ENSURE_TRUE(output, E_FAIL);
-
- return aGraph->Connect(output, input);
-}
-
-} // namespace mozilla
-
-// avoid redefined macro in unified build
-#undef WARN
diff --git a/dom/media/directshow/DirectShowUtils.h b/dom/media/directshow/DirectShowUtils.h
deleted file mode 100644
index 3bbc122fc..000000000
--- a/dom/media/directshow/DirectShowUtils.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef _DirectShowUtils_h_
-#define _DirectShowUtils_h_
-
-#include <stdint.h>
-#include "dshow.h"
-
-// XXXbz windowsx.h defines GetFirstChild, GetNextSibling,
-// GetPrevSibling are macros, apparently... Eeevil. We have functions
-// called that on some classes, so undef them.
-#undef GetFirstChild
-#undef GetNextSibling
-#undef GetPrevSibling
-
-#include "DShowTools.h"
-#include "mozilla/Logging.h"
-
-namespace mozilla {
-
-// Win32 "Event" wrapper. Must be paired with a CriticalSection to create a
-// Java-style "monitor".
-class Signal {
-public:
-
- Signal(CriticalSection* aLock)
- : mLock(aLock)
- {
- CriticalSectionAutoEnter lock(*mLock);
- mEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
- }
-
- ~Signal() {
- CriticalSectionAutoEnter lock(*mLock);
- CloseHandle(mEvent);
- }
-
- // Lock must be held.
- void Notify() {
- SetEvent(mEvent);
- }
-
- // Lock must be held. Check the wait condition before waiting!
- HRESULT Wait() {
- mLock->Leave();
- DWORD result = WaitForSingleObject(mEvent, INFINITE);
- mLock->Enter();
- return result == WAIT_OBJECT_0 ? S_OK : E_FAIL;
- }
-
-private:
- CriticalSection* mLock;
- HANDLE mEvent;
-};
-
-HRESULT
-AddGraphToRunningObjectTable(IUnknown *aUnkGraph, DWORD *aOutRotRegister);
-
-void
-RemoveGraphFromRunningObjectTable(DWORD aRotRegister);
-
-const char*
-GetGraphNotifyString(long evCode);
-
-// Creates a filter and adds it to a graph.
-HRESULT
-CreateAndAddFilter(IGraphBuilder* aGraph,
- REFGUID aFilterClsId,
- LPCWSTR aFilterName,
- IBaseFilter **aOutFilter);
-
-HRESULT
-AddMP3DMOWrapperFilter(IGraphBuilder* aGraph,
- IBaseFilter **aOutFilter);
-
-// Connects the output pin on aOutputFilter to an input pin on
-// aInputFilter, in aGraph.
-HRESULT
-ConnectFilters(IGraphBuilder* aGraph,
- IBaseFilter* aOutputFilter,
- IBaseFilter* aInputFilter);
-
-HRESULT
-MatchUnconnectedPin(IPin* aPin,
- PIN_DIRECTION aPinDir,
- bool *aOutMatches);
-
-// Converts from microseconds to DirectShow "Reference Time"
-// (hundreds of nanoseconds).
-inline int64_t
-UsecsToRefTime(const int64_t aUsecs)
-{
- return aUsecs * 10;
-}
-
-// Converts from DirectShow "Reference Time" (hundreds of nanoseconds)
-// to microseconds.
-inline int64_t
-RefTimeToUsecs(const int64_t hRefTime)
-{
- return hRefTime / 10;
-}
-
-// Converts from DirectShow "Reference Time" (hundreds of nanoseconds)
-// to seconds.
-inline double
-RefTimeToSeconds(const REFERENCE_TIME aRefTime)
-{
- return double(aRefTime) / 10000000;
-}
-
-const char*
-GetDirectShowGuidName(const GUID& aGuid);
-
-// Returns true if we can instantiate an MP3 demuxer and decoder filters.
-// Use this to detect whether MP3 support is installed.
-bool
-CanDecodeMP3UsingDirectShow();
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/directshow/SampleSink.cpp b/dom/media/directshow/SampleSink.cpp
deleted file mode 100644
index fa5dc8d19..000000000
--- a/dom/media/directshow/SampleSink.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "SampleSink.h"
-#include "AudioSinkFilter.h"
-#include "AudioSinkInputPin.h"
-#include "VideoUtils.h"
-#include "mozilla/Logging.h"
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-SampleSink::SampleSink()
- : mMonitor("SampleSink"),
- mIsFlushing(false),
- mAtEOS(false)
-{
- MOZ_COUNT_CTOR(SampleSink);
-}
-
-SampleSink::~SampleSink()
-{
- MOZ_COUNT_DTOR(SampleSink);
-}
-
-void
-SampleSink::SetAudioFormat(const WAVEFORMATEX* aInFormat)
-{
- NS_ENSURE_TRUE(aInFormat, );
- ReentrantMonitorAutoEnter mon(mMonitor);
- memcpy(&mAudioFormat, aInFormat, sizeof(WAVEFORMATEX));
-}
-
-void
-SampleSink::GetAudioFormat(WAVEFORMATEX* aOutFormat)
-{
- MOZ_ASSERT(aOutFormat);
- ReentrantMonitorAutoEnter mon(mMonitor);
- memcpy(aOutFormat, &mAudioFormat, sizeof(WAVEFORMATEX));
-}
-
-HRESULT
-SampleSink::Receive(IMediaSample* aSample)
-{
- ReentrantMonitorAutoEnter mon(mMonitor);
-
- while (true) {
- if (mIsFlushing) {
- return S_FALSE;
- }
- if (!mSample) {
- break;
- }
- if (mAtEOS) {
- return E_UNEXPECTED;
- }
- // Wait until the consumer thread consumes the sample.
- mon.Wait();
- }
-
- if (MOZ_LOG_TEST(gDirectShowLog, LogLevel::Debug)) {
- REFERENCE_TIME start = 0, end = 0;
- HRESULT hr = aSample->GetMediaTime(&start, &end);
- LOG("SampleSink::Receive() [%4.2lf-%4.2lf]",
- (double)RefTimeToUsecs(start) / USECS_PER_S,
- (double)RefTimeToUsecs(end) / USECS_PER_S);
- }
-
- mSample = aSample;
- // Notify the signal, to awaken the consumer thread in WaitForSample()
- // if necessary.
- mon.NotifyAll();
- return S_OK;
-}
-
-HRESULT
-SampleSink::Extract(RefPtr<IMediaSample>& aOutSample)
-{
- ReentrantMonitorAutoEnter mon(mMonitor);
- // Loop until we have a sample, or we should abort.
- while (true) {
- if (mIsFlushing) {
- return S_FALSE;
- }
- if (mSample) {
- break;
- }
- if (mAtEOS) {
- // Order is important here, if we have a sample, we should return it
- // before reporting EOS.
- return E_UNEXPECTED;
- }
- // Wait until the producer thread gives us a sample.
- mon.Wait();
- }
- aOutSample = mSample;
-
- if (MOZ_LOG_TEST(gDirectShowLog, LogLevel::Debug)) {
- int64_t start = 0, end = 0;
- mSample->GetMediaTime(&start, &end);
- LOG("SampleSink::Extract() [%4.2lf-%4.2lf]",
- (double)RefTimeToUsecs(start) / USECS_PER_S,
- (double)RefTimeToUsecs(end) / USECS_PER_S);
- }
-
- mSample = nullptr;
- // Notify the signal, to awaken the producer thread in Receive()
- // if necessary.
- mon.NotifyAll();
- return S_OK;
-}
-
-void
-SampleSink::Flush()
-{
- LOG("SampleSink::Flush()");
- ReentrantMonitorAutoEnter mon(mMonitor);
- mIsFlushing = true;
- mSample = nullptr;
- mon.NotifyAll();
-}
-
-void
-SampleSink::Reset()
-{
- LOG("SampleSink::Reset()");
- ReentrantMonitorAutoEnter mon(mMonitor);
- mIsFlushing = false;
- mAtEOS = false;
-}
-
-void
-SampleSink::SetEOS()
-{
- LOG("SampleSink::SetEOS()");
- ReentrantMonitorAutoEnter mon(mMonitor);
- mAtEOS = true;
- // Notify to unblock any threads waiting for samples in
- // Extract() or Receive(). Now that we're at EOS, no more samples
- // will come!
- mon.NotifyAll();
-}
-
-bool
-SampleSink::AtEOS()
-{
- ReentrantMonitorAutoEnter mon(mMonitor);
- return mAtEOS && !mSample;
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/SampleSink.h b/dom/media/directshow/SampleSink.h
deleted file mode 100644
index 6a1af9fee..000000000
--- a/dom/media/directshow/SampleSink.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(SampleSink_h_)
-#define SampleSink_h_
-
-#include "BaseFilter.h"
-#include "DirectShowUtils.h"
-#include "mozilla/RefPtr.h"
-#include "mozilla/ReentrantMonitor.h"
-
-namespace mozilla {
-
-class SampleSink {
-public:
- SampleSink();
- virtual ~SampleSink();
-
- // Sets the audio format of the incoming samples. The upstream filter
- // calls this. This makes a copy.
- void SetAudioFormat(const WAVEFORMATEX* aInFormat);
-
- // Copies the format of incoming audio samples into into *aOutFormat.
- void GetAudioFormat(WAVEFORMATEX* aOutFormat);
-
- // Called when a sample is delivered by the DirectShow graph to the sink.
- // The decode thread retrieves the sample by calling WaitForSample().
- // Blocks if there's already a sample waiting to be consumed by the decode
- // thread.
- HRESULT Receive(IMediaSample* aSample);
-
- // Retrieves a sample from the sample queue, blocking until one becomes
- // available, or until an error occurs. Returns S_FALSE on EOS.
- HRESULT Extract(RefPtr<IMediaSample>& aOutSample);
-
- // Unblocks any threads waiting in GetSample().
- // Clears mSample, which unblocks upstream stream.
- void Flush();
-
- // Opens up the sink to receive more samples in PutSample().
- // Clears EOS flag.
- void Reset();
-
- // Marks that we've reacehd the end of stream.
- void SetEOS();
-
- // Returns whether we're at end of stream.
- bool AtEOS();
-
-private:
- // All data in this class is syncronized by mMonitor.
- ReentrantMonitor mMonitor;
- RefPtr<IMediaSample> mSample;
-
- // Format of the audio stream we're receiving.
- WAVEFORMATEX mAudioFormat;
-
- bool mIsFlushing;
- bool mAtEOS;
-};
-
-} // namespace mozilla
-
-#endif // SampleSink_h_
diff --git a/dom/media/directshow/SourceFilter.cpp b/dom/media/directshow/SourceFilter.cpp
deleted file mode 100644
index 4c5a0882c..000000000
--- a/dom/media/directshow/SourceFilter.cpp
+++ /dev/null
@@ -1,683 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "SourceFilter.h"
-#include "MediaResource.h"
-#include "mozilla/RefPtr.h"
-#include "DirectShowUtils.h"
-#include "MP3FrameParser.h"
-#include "mozilla/Logging.h"
-#include <algorithm>
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-// Define to trace what's on...
-//#define DEBUG_SOURCE_TRACE 1
-
-#if defined (DEBUG_SOURCE_TRACE)
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define DIRECTSHOW_LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-#else
-#define DIRECTSHOW_LOG(...)
-#endif
-
-static HRESULT
-DoGetInterface(IUnknown* aUnknown, void** aInterface)
-{
- if (!aInterface)
- return E_POINTER;
- *aInterface = aUnknown;
- aUnknown->AddRef();
- return S_OK;
-}
-
-// Stores details of IAsyncReader::Request().
-class ReadRequest {
-public:
-
- ReadRequest(IMediaSample* aSample,
- DWORD_PTR aDwUser,
- uint32_t aOffset,
- uint32_t aCount)
- : mSample(aSample),
- mDwUser(aDwUser),
- mOffset(aOffset),
- mCount(aCount)
- {
- MOZ_COUNT_CTOR(ReadRequest);
- }
-
- ~ReadRequest() {
- MOZ_COUNT_DTOR(ReadRequest);
- }
-
- RefPtr<IMediaSample> mSample;
- DWORD_PTR mDwUser;
- uint32_t mOffset;
- uint32_t mCount;
-};
-
-// A wrapper around media resource that presents only a partition of the
-// underlying resource to the caller to use. The partition returned is from
-// an offset to the end of stream, and this object deals with ensuring
-// the offsets and lengths etc are translated from the reduced partition
-// exposed to the caller, to the absolute offsets of the underlying stream.
-class MediaResourcePartition {
-public:
- MediaResourcePartition(MediaResource* aResource,
- int64_t aDataStart)
- : mResource(aResource),
- mDataOffset(aDataStart)
- {}
-
- int64_t GetLength() {
- int64_t len = mResource.GetLength();
- if (len == -1) {
- return len;
- }
- return std::max<int64_t>(0, len - mDataOffset);
- }
- nsresult ReadAt(int64_t aOffset, char* aBuffer,
- uint32_t aCount, uint32_t* aBytes)
- {
- return mResource.ReadAt(aOffset + mDataOffset,
- aBuffer,
- aCount,
- aBytes);
- }
- int64_t GetCachedDataEnd() {
- int64_t tell = mResource.GetResource()->Tell();
- int64_t dataEnd =
- mResource.GetResource()->GetCachedDataEnd(tell) - mDataOffset;
- return dataEnd;
- }
-private:
- // MediaResource from which we read data.
- MediaResourceIndex mResource;
- int64_t mDataOffset;
-};
-
-
-// Output pin for SourceFilter, which implements IAsyncReader, to
-// allow downstream filters to pull/read data from it. Downstream pins
-// register to read data using Request(), and asynchronously wait for the
-// reads to complete using WaitForNext(). They may also synchronously read
-// using SyncRead(). This class is a delegate (tear off) of
-// SourceFilter.
-//
-// We can expose only a segment of the MediaResource to the filter graph.
-// This is used to strip off the ID3v2 tags from the stream, as DirectShow
-// has trouble parsing some headers.
-//
-// Implements:
-// * IAsyncReader
-// * IPin
-// * IQualityControl
-// * IUnknown
-//
-class DECLSPEC_UUID("18e5cfb2-1015-440c-a65c-e63853235894")
-OutputPin : public IAsyncReader,
- public BasePin
-{
-public:
-
- OutputPin(MediaResource* aMediaResource,
- SourceFilter* aParent,
- CriticalSection& aFilterLock,
- int64_t aMP3DataStart);
- virtual ~OutputPin();
-
- // IUnknown
- // Defer to ref counting to BasePin, which defers to owning nsBaseFilter.
- STDMETHODIMP_(ULONG) AddRef() override { return BasePin::AddRef(); }
- STDMETHODIMP_(ULONG) Release() override { return BasePin::Release(); }
- STDMETHODIMP QueryInterface(REFIID iid, void** ppv) override;
-
- // BasePin Overrides.
- // Determines if the pin accepts a specific media type.
- HRESULT CheckMediaType(const MediaType* aMediaType) override;
-
- // Retrieves a preferred media type, by index value.
- HRESULT GetMediaType(int aPosition, MediaType* aMediaType) override;
-
- // Releases the pin from a connection.
- HRESULT BreakConnect(void) override;
-
- // Determines whether a pin connection is suitable.
- HRESULT CheckConnect(IPin* aPin) override;
-
-
- // IAsyncReader overrides
-
- // The RequestAllocator method requests an allocator during the
- // pin connection.
- STDMETHODIMP RequestAllocator(IMemAllocator* aPreferred,
- ALLOCATOR_PROPERTIES* aProps,
- IMemAllocator** aActual) override;
-
- // The Request method queues an asynchronous request for data. Downstream
- // will call WaitForNext() when they want to retrieve the result.
- STDMETHODIMP Request(IMediaSample* aSample, DWORD_PTR aUserData) override;
-
- // The WaitForNext method waits for the next pending read request
- // to complete. This method fails if the graph is flushing.
- // Defers to SyncRead/5.
- STDMETHODIMP WaitForNext(DWORD aTimeout,
- IMediaSample** aSamples,
- DWORD_PTR* aUserData) override;
-
- // The SyncReadAligned method performs a synchronous read. The method
- // blocks until the request is completed. Defers to SyncRead/5. This
- // method does not fail if the graph is flushing.
- STDMETHODIMP SyncReadAligned(IMediaSample* aSample) override;
-
- // The SyncRead method performs a synchronous read. The method blocks
- // until the request is completed. Defers to SyncRead/5. This
- // method does not fail if the graph is flushing.
- STDMETHODIMP SyncRead(LONGLONG aPosition, LONG aLength, BYTE* aBuffer) override;
-
- // The Length method retrieves the total length of the stream.
- STDMETHODIMP Length(LONGLONG* aTotal, LONGLONG* aAvailable) override;
-
- // IPin Overrides
- STDMETHODIMP BeginFlush(void) override;
- STDMETHODIMP EndFlush(void) override;
-
- uint32_t GetAndResetBytesConsumedCount();
-
-private:
-
- // Protects thread-shared data/structures (mFlushCount, mPendingReads).
- // WaitForNext() also waits on this monitor
- CriticalSection& mPinLock;
-
- // Signal used with mPinLock to implement WaitForNext().
- Signal mSignal;
-
- // The filter that owns us. Weak reference, as we're a delegate (tear off).
- SourceFilter* mParentSource;
-
- MediaResourcePartition mResource;
-
- // Counter, inc'd in BeginFlush(), dec'd in EndFlush(). Calls to this can
- // come from multiple threads and can interleave, hence the counter.
- int32_t mFlushCount;
-
- // Number of bytes that have been read from the output pin since the last
- // time GetAndResetBytesConsumedCount() was called.
- uint32_t mBytesConsumed;
-
- // Deque of ReadRequest* for reads that are yet to be serviced.
- // nsReadRequest's are stored on the heap, popper must delete them.
- nsDeque mPendingReads;
-
- // Flags if the downstream pin has QI'd for IAsyncReader. We refuse
- // connection if they don't query, as it means they're assuming that we're
- // a push filter, and we're not.
- bool mQueriedForAsyncReader;
-
-};
-
-// For mingw __uuidof support
-#ifdef __CRT_UUID_DECL
-}
-__CRT_UUID_DECL(mozilla::OutputPin, 0x18e5cfb2,0x1015,0x440c,0xa6,0x5c,0xe6,0x38,0x53,0x23,0x58,0x94);
-namespace mozilla {
-#endif
-
-OutputPin::OutputPin(MediaResource* aResource,
- SourceFilter* aParent,
- CriticalSection& aFilterLock,
- int64_t aMP3DataStart)
- : BasePin(static_cast<BaseFilter*>(aParent),
- &aFilterLock,
- L"MozillaOutputPin",
- PINDIR_OUTPUT),
- mPinLock(aFilterLock),
- mSignal(&mPinLock),
- mParentSource(aParent),
- mResource(aResource, aMP3DataStart),
- mFlushCount(0),
- mBytesConsumed(0),
- mQueriedForAsyncReader(false)
-{
- MOZ_COUNT_CTOR(OutputPin);
- DIRECTSHOW_LOG("OutputPin::OutputPin()");
-}
-
-OutputPin::~OutputPin()
-{
- MOZ_COUNT_DTOR(OutputPin);
- DIRECTSHOW_LOG("OutputPin::~OutputPin()");
-}
-
-HRESULT
-OutputPin::BreakConnect()
-{
- mQueriedForAsyncReader = false;
- return BasePin::BreakConnect();
-}
-
-STDMETHODIMP
-OutputPin::QueryInterface(REFIID aIId, void** aInterface)
-{
- if (aIId == IID_IAsyncReader) {
- mQueriedForAsyncReader = true;
- return DoGetInterface(static_cast<IAsyncReader*>(this), aInterface);
- }
-
- if (aIId == __uuidof(OutputPin)) {
- AddRef();
- *aInterface = this;
- return S_OK;
- }
-
- return BasePin::QueryInterface(aIId, aInterface);
-}
-
-HRESULT
-OutputPin::CheckConnect(IPin* aPin)
-{
- // Our connection is only suitable if the downstream pin knows
- // that we're asynchronous (i.e. it queried for IAsyncReader).
- return mQueriedForAsyncReader ? S_OK : S_FALSE;
-}
-
-HRESULT
-OutputPin::CheckMediaType(const MediaType* aMediaType)
-{
- const MediaType *myMediaType = mParentSource->GetMediaType();
-
- if (IsEqualGUID(aMediaType->majortype, myMediaType->majortype) &&
- IsEqualGUID(aMediaType->subtype, myMediaType->subtype) &&
- IsEqualGUID(aMediaType->formattype, myMediaType->formattype))
- {
- DIRECTSHOW_LOG("OutputPin::CheckMediaType() Match: major=%s minor=%s TC=%d FSS=%d SS=%u",
- GetDirectShowGuidName(aMediaType->majortype),
- GetDirectShowGuidName(aMediaType->subtype),
- aMediaType->TemporalCompression(),
- aMediaType->bFixedSizeSamples,
- aMediaType->SampleSize());
- return S_OK;
- }
-
- DIRECTSHOW_LOG("OutputPin::CheckMediaType() Failed to match: major=%s minor=%s TC=%d FSS=%d SS=%u",
- GetDirectShowGuidName(aMediaType->majortype),
- GetDirectShowGuidName(aMediaType->subtype),
- aMediaType->TemporalCompression(),
- aMediaType->bFixedSizeSamples,
- aMediaType->SampleSize());
- return S_FALSE;
-}
-
-HRESULT
-OutputPin::GetMediaType(int aPosition, MediaType* aMediaType)
-{
- if (!aMediaType)
- return E_POINTER;
-
- if (aPosition == 0) {
- aMediaType->Assign(mParentSource->GetMediaType());
- return S_OK;
- }
- return VFW_S_NO_MORE_ITEMS;
-}
-
-static inline bool
-IsPowerOf2(int32_t x) {
- return ((-x & x) != x);
-}
-
-STDMETHODIMP
-OutputPin::RequestAllocator(IMemAllocator* aPreferred,
- ALLOCATOR_PROPERTIES* aProps,
- IMemAllocator** aActual)
-{
- // Require the downstream pin to suggest what they want...
- if (!aPreferred) return E_POINTER;
- if (!aProps) return E_POINTER;
- if (!aActual) return E_POINTER;
-
- // We only care about alignment - our allocator will reject anything
- // which isn't power-of-2 aligned, so so try a 4-byte aligned allocator.
- ALLOCATOR_PROPERTIES props;
- memcpy(&props, aProps, sizeof(ALLOCATOR_PROPERTIES));
- if (aProps->cbAlign == 0 || IsPowerOf2(aProps->cbAlign)) {
- props.cbAlign = 4;
- }
-
- // Limit allocator's number of buffers. We know that the media will most
- // likely be bound by network speed, not by decoding speed. We also
- // store the incoming data in a Gecko stream, if we don't limit buffers
- // here we'll end up duplicating a lot of storage. We must have enough
- // space for audio key frames to fit in the first batch of buffers however,
- // else pausing may fail for some downstream decoders.
- if (props.cBuffers > BaseFilter::sMaxNumBuffers) {
- props.cBuffers = BaseFilter::sMaxNumBuffers;
- }
-
- // The allocator properties that are actually used. We don't store
- // this, we need it for SetProperties() below to succeed.
- ALLOCATOR_PROPERTIES actualProps;
- HRESULT hr;
-
- if (aPreferred) {
- // Play nice and prefer the downstream pin's preferred allocator.
- hr = aPreferred->SetProperties(&props, &actualProps);
- if (SUCCEEDED(hr)) {
- aPreferred->AddRef();
- *aActual = aPreferred;
- return S_OK;
- }
- }
-
- // Else downstream hasn't requested a specific allocator, so create one...
-
- // Just create a default allocator. It's highly unlikely that we'll use
- // this anyway, as most parsers insist on using their own allocators.
- RefPtr<IMemAllocator> allocator;
- hr = CoCreateInstance(CLSID_MemoryAllocator,
- 0,
- CLSCTX_INPROC_SERVER,
- IID_IMemAllocator,
- getter_AddRefs(allocator));
- if(FAILED(hr) || (allocator == nullptr)) {
- NS_WARNING("Can't create our own DirectShow allocator.");
- return hr;
- }
-
- // See if we can make it suitable
- hr = allocator->SetProperties(&props, &actualProps);
- if (SUCCEEDED(hr)) {
- // We need to release our refcount on pAlloc, and addref
- // it to pass a refcount to the caller - this is a net nothing.
- allocator.forget(aActual);
- return S_OK;
- }
-
- NS_WARNING("Failed to pick an allocator");
- return hr;
-}
-
-STDMETHODIMP
-OutputPin::Request(IMediaSample* aSample, DWORD_PTR aDwUser)
-{
- if (!aSample) return E_FAIL;
-
- CriticalSectionAutoEnter lock(*mLock);
- NS_ASSERTION(!mFlushCount, "Request() while flushing");
-
- if (mFlushCount)
- return VFW_E_WRONG_STATE;
-
- REFERENCE_TIME refStart = 0, refEnd = 0;
- if (FAILED(aSample->GetTime(&refStart, &refEnd))) {
- NS_WARNING("Sample incorrectly timestamped");
- return VFW_E_SAMPLE_TIME_NOT_SET;
- }
-
- // Convert reference time to bytes.
- uint32_t start = (uint32_t)(refStart / 10000000);
- uint32_t end = (uint32_t)(refEnd / 10000000);
-
- uint32_t numBytes = end - start;
-
- ReadRequest* request = new ReadRequest(aSample,
- aDwUser,
- start,
- numBytes);
- // Memory for |request| is free when it's popped from the completed
- // reads list.
-
- // Push this onto the queue of reads to be serviced.
- mPendingReads.Push(request);
-
- // Notify any threads blocked in WaitForNext() which are waiting for mPendingReads
- // to become non-empty.
- mSignal.Notify();
-
- return S_OK;
-}
-
-STDMETHODIMP
-OutputPin::WaitForNext(DWORD aTimeout,
- IMediaSample** aOutSample,
- DWORD_PTR* aOutDwUser)
-{
- NS_ASSERTION(aTimeout == 0 || aTimeout == INFINITE,
- "Oops, we don't handle this!");
-
- *aOutSample = nullptr;
- *aOutDwUser = 0;
-
- LONGLONG offset = 0;
- LONG count = 0;
- BYTE* buf = nullptr;
-
- {
- CriticalSectionAutoEnter lock(*mLock);
-
- // Wait until there's a pending read to service.
- while (aTimeout && mPendingReads.GetSize() == 0 && !mFlushCount) {
- // Note: No need to guard against shutdown-during-wait here, as
- // typically the thread doing the pull will have already called
- // Request(), so we won't Wait() here anyway. SyncRead() will fail
- // on shutdown.
- mSignal.Wait();
- }
-
- nsAutoPtr<ReadRequest> request(reinterpret_cast<ReadRequest*>(mPendingReads.PopFront()));
- if (!request)
- return VFW_E_WRONG_STATE;
-
- *aOutSample = request->mSample;
- *aOutDwUser = request->mDwUser;
-
- offset = request->mOffset;
- count = request->mCount;
- buf = nullptr;
- request->mSample->GetPointer(&buf);
- NS_ASSERTION(buf != nullptr, "Invalid buffer!");
-
- if (mFlushCount) {
- return VFW_E_TIMEOUT;
- }
- }
-
- return SyncRead(offset, count, buf);
-}
-
-STDMETHODIMP
-OutputPin::SyncReadAligned(IMediaSample* aSample)
-{
- {
- // Ignore reads while flushing.
- CriticalSectionAutoEnter lock(*mLock);
- if (mFlushCount) {
- return S_FALSE;
- }
- }
-
- if (!aSample)
- return E_FAIL;
-
- REFERENCE_TIME lStart = 0, lEnd = 0;
- if (FAILED(aSample->GetTime(&lStart, &lEnd))) {
- NS_WARNING("Sample incorrectly timestamped");
- return VFW_E_SAMPLE_TIME_NOT_SET;
- }
-
- // Convert reference time to bytes.
- int32_t start = (int32_t)(lStart / 10000000);
- int32_t end = (int32_t)(lEnd / 10000000);
-
- int32_t numBytes = end - start;
-
- // If the range extends off the end of stream, truncate to the end of stream
- // as per IAsyncReader specificiation.
- int64_t streamLength = mResource.GetLength();
- if (streamLength != -1) {
- // We know the exact length of the stream, fail if the requested offset
- // is beyond it.
- if (start > streamLength) {
- return VFW_E_BADALIGN;
- }
-
- // If the end of the chunk to read is off the end of the stream,
- // truncate it to the end of the stream.
- if ((start + numBytes) > streamLength) {
- numBytes = (uint32_t)(streamLength - start);
- }
- }
-
- BYTE* buf=0;
- aSample->GetPointer(&buf);
-
- return SyncRead(start, numBytes, buf);
-}
-
-STDMETHODIMP
-OutputPin::SyncRead(LONGLONG aPosition,
- LONG aLength,
- BYTE* aBuffer)
-{
- MOZ_ASSERT(!NS_IsMainThread());
- NS_ENSURE_TRUE(aPosition >= 0, E_FAIL);
- NS_ENSURE_TRUE(aLength > 0, E_FAIL);
- NS_ENSURE_TRUE(aBuffer, E_POINTER);
-
- DIRECTSHOW_LOG("OutputPin::SyncRead(%lld, %d)", aPosition, aLength);
- {
- // Ignore reads while flushing.
- CriticalSectionAutoEnter lock(*mLock);
- if (mFlushCount) {
- return S_FALSE;
- }
- }
-
- uint32_t totalBytesRead = 0;
- nsresult rv = mResource.ReadAt(aPosition,
- reinterpret_cast<char*>(aBuffer),
- aLength,
- &totalBytesRead);
- if (NS_FAILED(rv)) {
- return E_FAIL;
- }
- if (totalBytesRead > 0) {
- CriticalSectionAutoEnter lock(*mLock);
- mBytesConsumed += totalBytesRead;
- }
- return (totalBytesRead == aLength) ? S_OK : S_FALSE;
-}
-
-STDMETHODIMP
-OutputPin::Length(LONGLONG* aTotal, LONGLONG* aAvailable)
-{
- HRESULT hr = S_OK;
- int64_t length = mResource.GetLength();
- if (length == -1) {
- hr = VFW_S_ESTIMATED;
- // Don't have a length. Just lie, it seems to work...
- *aTotal = INT32_MAX;
- } else {
- *aTotal = length;
- }
- if (aAvailable) {
- *aAvailable = mResource.GetCachedDataEnd();
- }
-
- DIRECTSHOW_LOG("OutputPin::Length() len=%lld avail=%lld", *aTotal, *aAvailable);
-
- return hr;
-}
-
-STDMETHODIMP
-OutputPin::BeginFlush()
-{
- CriticalSectionAutoEnter lock(*mLock);
- mFlushCount++;
- mSignal.Notify();
- return S_OK;
-}
-
-STDMETHODIMP
-OutputPin::EndFlush(void)
-{
- CriticalSectionAutoEnter lock(*mLock);
- mFlushCount--;
- return S_OK;
-}
-
-uint32_t
-OutputPin::GetAndResetBytesConsumedCount()
-{
- CriticalSectionAutoEnter lock(*mLock);
- uint32_t bytesConsumed = mBytesConsumed;
- mBytesConsumed = 0;
- return bytesConsumed;
-}
-
-SourceFilter::SourceFilter(const GUID& aMajorType,
- const GUID& aSubType)
- : BaseFilter(L"MozillaDirectShowSource", __uuidof(SourceFilter))
-{
- MOZ_COUNT_CTOR(SourceFilter);
- mMediaType.majortype = aMajorType;
- mMediaType.subtype = aSubType;
-
- DIRECTSHOW_LOG("SourceFilter Constructor(%s, %s)",
- GetDirectShowGuidName(aMajorType),
- GetDirectShowGuidName(aSubType));
-}
-
-SourceFilter::~SourceFilter()
-{
- MOZ_COUNT_DTOR(SourceFilter);
- DIRECTSHOW_LOG("SourceFilter Destructor()");
-}
-
-BasePin*
-SourceFilter::GetPin(int n)
-{
- if (n == 0) {
- NS_ASSERTION(mOutputPin != 0, "GetPin with no pin!");
- return static_cast<BasePin*>(mOutputPin);
- } else {
- return nullptr;
- }
-}
-
-// Get's the media type we're supplying.
-const MediaType*
-SourceFilter::GetMediaType() const
-{
- return &mMediaType;
-}
-
-nsresult
-SourceFilter::Init(MediaResource* aResource, int64_t aMP3Offset)
-{
- DIRECTSHOW_LOG("SourceFilter::Init()");
-
- mOutputPin = new OutputPin(aResource,
- this,
- mLock,
- aMP3Offset);
- NS_ENSURE_TRUE(mOutputPin != nullptr, NS_ERROR_FAILURE);
-
- return NS_OK;
-}
-
-uint32_t
-SourceFilter::GetAndResetBytesConsumedCount()
-{
- return mOutputPin->GetAndResetBytesConsumedCount();
-}
-
-
-} // namespace mozilla
diff --git a/dom/media/directshow/SourceFilter.h b/dom/media/directshow/SourceFilter.h
deleted file mode 100644
index d5ce2770e..000000000
--- a/dom/media/directshow/SourceFilter.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(nsDirectShowSource_h___)
-#define nsDirectShowSource_h___
-
-#include "BaseFilter.h"
-#include "BasePin.h"
-#include "MediaType.h"
-
-#include "nsDeque.h"
-#include "nsAutoPtr.h"
-#include "DirectShowUtils.h"
-#include "mozilla/RefPtr.h"
-
-namespace mozilla {
-
-class MediaResource;
-class OutputPin;
-
-
-// SourceFilter is an asynchronous DirectShow source filter which
-// reads from an MediaResource, and supplies data via a pull model downstream
-// using OutputPin. It us used to supply a generic byte stream into
-// DirectShow.
-//
-// Implements:
-// * IBaseFilter
-// * IMediaFilter
-// * IPersist
-// * IUnknown
-//
-class DECLSPEC_UUID("5c2a7ad0-ba82-4659-9178-c4719a2765d6")
-SourceFilter : public media::BaseFilter
-{
-public:
-
- // Constructs source filter to deliver given media type.
- SourceFilter(const GUID& aMajorType, const GUID& aSubType);
- ~SourceFilter();
-
- nsresult Init(MediaResource *aResource, int64_t aMP3Offset);
-
- // BaseFilter overrides.
- // Only one output - the byte stream.
- int GetPinCount() override { return 1; }
-
- media::BasePin* GetPin(int n) override;
-
- // Get's the media type we're supplying.
- const media::MediaType* GetMediaType() const;
-
- uint32_t GetAndResetBytesConsumedCount();
-
-protected:
-
- // Our async pull output pin.
- nsAutoPtr<OutputPin> mOutputPin;
-
- // Type of byte stream we output.
- media::MediaType mMediaType;
-
-};
-
-} // namespace mozilla
-
-// For mingw __uuidof support
-#ifdef __CRT_UUID_DECL
-__CRT_UUID_DECL(mozilla::SourceFilter, 0x5c2a7ad0,0xba82,0x4659,0x91,0x78,0xc4,0x71,0x9a,0x27,0x65,0xd6);
-#endif
-
-#endif
diff --git a/dom/media/directshow/moz.build b/dom/media/directshow/moz.build
deleted file mode 100644
index 8a9b76200..000000000
--- a/dom/media/directshow/moz.build
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
-# vim: set filetype=python:
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-EXPORTS += [
- 'AudioSinkFilter.h',
- 'AudioSinkInputPin.h',
- 'DirectShowDecoder.h',
- 'DirectShowReader.h',
- 'DirectShowUtils.h',
-]
-
-UNIFIED_SOURCES += [
- 'DirectShowDecoder.cpp',
- 'DirectShowUtils.cpp',
- 'SourceFilter.cpp',
-]
-
-SOURCES += [
- 'AudioSinkFilter.cpp',
- 'AudioSinkInputPin.cpp',
- 'DirectShowReader.cpp',
- 'SampleSink.cpp',
-]
-
-# If WebRTC isn't being built, we need to compile the DirectShow base classes so that
-# they're available at link time.
-if not CONFIG['MOZ_WEBRTC']:
- SOURCES += [
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/BaseFilter.cpp',
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/BaseInputPin.cpp',
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/BasePin.cpp',
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/MediaType.cpp',
- ]
-
-FINAL_LIBRARY = 'xul'
-LOCAL_INCLUDES += [
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows',
-]
diff --git a/dom/media/eme/DetailedPromise.cpp b/dom/media/eme/DetailedPromise.cpp
index 5893bea2e..d443e3336 100644
--- a/dom/media/eme/DetailedPromise.cpp
+++ b/dom/media/eme/DetailedPromise.cpp
@@ -96,9 +96,6 @@ DetailedPromise::MaybeReportTelemetry(Status aStatus)
uint32_t latency = (TimeStamp::Now() - mStartTime).ToMilliseconds();
EME_LOG("%s %s latency %ums reported via telemetry", mName.get(),
((aStatus == Succeeded) ? "succcess" : "failure"), latency);
- Telemetry::ID tid = (aStatus == Succeeded) ? mSuccessLatencyProbe.Value()
- : mFailureLatencyProbe.Value();
- Telemetry::Accumulate(tid, latency);
}
} // namespace dom
diff --git a/dom/media/eme/MediaKeySession.cpp b/dom/media/eme/MediaKeySession.cpp
index d5eff3f77..9c002b5ba 100644
--- a/dom/media/eme/MediaKeySession.cpp
+++ b/dom/media/eme/MediaKeySession.cpp
@@ -315,9 +315,6 @@ MediaKeySession::GenerateRequest(const nsAString& aInitDataType,
// Note: Remaining steps of generateRequest method continue in CDM.
- Telemetry::Accumulate(Telemetry::VIDEO_CDM_GENERATE_REQUEST_CALLED,
- ToCDMTypeTelemetryEnum(mKeySystem));
-
// Convert initData to base64 for easier logging.
// Note: CreateSession() Move()s the data out of the array, so we have
// to copy it here.
diff --git a/dom/media/eme/MediaKeys.cpp b/dom/media/eme/MediaKeys.cpp
index eedd675e4..fea548698 100644
--- a/dom/media/eme/MediaKeys.cpp
+++ b/dom/media/eme/MediaKeys.cpp
@@ -13,7 +13,6 @@
#include "mozilla/dom/MediaKeySession.h"
#include "mozilla/dom/DOMException.h"
#include "mozilla/dom/UnionTypes.h"
-#include "mozilla/Telemetry.h"
#include "GMPCDMProxy.h"
#ifdef MOZ_WIDGET_ANDROID
#include "mozilla/MediaDrmCDMProxy.h"
@@ -457,7 +456,6 @@ MediaKeys::OnCDMCreated(PromiseId aId, const nsACString& aNodeId, const uint32_t
mKeySystem,
MediaKeySystemStatus::Cdm_created);
- Telemetry::Accumulate(Telemetry::VIDEO_CDM_CREATED, ToCDMTypeTelemetryEnum(mKeySystem));
}
static bool
diff --git a/dom/media/fmp4/MP4Decoder.cpp b/dom/media/fmp4/MP4Decoder.cpp
index fdd6f2c7e..b293c251b 100644
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -83,10 +83,6 @@ MP4Decoder::CanHandleMediaType(const MediaContentType& aType,
const bool isMP4Audio = aType.GetMIMEType().EqualsASCII("audio/mp4") ||
aType.GetMIMEType().EqualsASCII("audio/x-m4a");
const bool isMP4Video =
- // On B2G, treat 3GPP as MP4 when Gonk PDM is available.
-#ifdef MOZ_GONK_MEDIACODEC
- aType.GetMIMEType().EqualsASCII(VIDEO_3GPP) ||
-#endif
aType.GetMIMEType().EqualsASCII("video/mp4") ||
aType.GetMIMEType().EqualsASCII("video/quicktime") ||
aType.GetMIMEType().EqualsASCII("video/x-m4v");
@@ -139,6 +135,14 @@ MP4Decoder::CanHandleMediaType(const MediaContentType& aType,
NS_LITERAL_CSTRING("audio/flac"), aType));
continue;
}
+#ifdef MOZ_AV1
+ if (IsAV1CodecString(codec)) {
+ trackInfos.AppendElement(
+ CreateTrackInfoWithMIMETypeAndContentTypeExtraParameters(
+ NS_LITERAL_CSTRING("video/av1"), aType));
+ continue;
+ }
+#endif
// Note: Only accept H.264 in a video content type, not in an audio
// content type.
if (IsWhitelistedH264Codec(codec) && isMP4Video) {
@@ -168,7 +172,8 @@ bool
MP4Decoder::IsH264(const nsACString& aMimeType)
{
return aMimeType.EqualsLiteral("video/mp4") ||
- aMimeType.EqualsLiteral("video/avc");
+ aMimeType.EqualsLiteral("video/avc") ||
+ aMimeType.EqualsLiteral("video/webm; codecs=avc1");
}
/* static */
diff --git a/dom/media/fmp4/MP4Demuxer.cpp b/dom/media/fmp4/MP4Demuxer.cpp
index 646897468..ef68d5dca 100644
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -16,9 +16,6 @@
#include "mp4_demuxer/Index.h"
#include "nsPrintfCString.h"
-// Used for telemetry
-#include "mozilla/Telemetry.h"
-#include "mp4_demuxer/AnnexB.h"
#include "mp4_demuxer/H264.h"
#include "nsAutoPtr.h"
@@ -72,47 +69,10 @@ private:
// Queued samples extracted by the demuxer, but not yet returned.
RefPtr<MediaRawData> mQueuedSample;
bool mNeedReIndex;
- bool mNeedSPSForTelemetry;
bool mIsH264 = false;
};
-// Returns true if no SPS was found and search for it should continue.
-bool
-AccumulateSPSTelemetry(const MediaByteBuffer* aExtradata)
-{
- mp4_demuxer::SPSData spsdata;
- if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtradata, spsdata)) {
- uint8_t constraints = (spsdata.constraint_set0_flag ? (1 << 0) : 0) |
- (spsdata.constraint_set1_flag ? (1 << 1) : 0) |
- (spsdata.constraint_set2_flag ? (1 << 2) : 0) |
- (spsdata.constraint_set3_flag ? (1 << 3) : 0) |
- (spsdata.constraint_set4_flag ? (1 << 4) : 0) |
- (spsdata.constraint_set5_flag ? (1 << 5) : 0);
- Telemetry::Accumulate(Telemetry::VIDEO_DECODED_H264_SPS_CONSTRAINT_SET_FLAG,
- constraints);
-
- // Collect profile_idc values up to 244, otherwise 0 for unknown.
- Telemetry::Accumulate(Telemetry::VIDEO_DECODED_H264_SPS_PROFILE,
- spsdata.profile_idc <= 244 ? spsdata.profile_idc : 0);
-
- // Make sure level_idc represents a value between levels 1 and 5.2,
- // otherwise collect 0 for unknown level.
- Telemetry::Accumulate(Telemetry::VIDEO_DECODED_H264_SPS_LEVEL,
- (spsdata.level_idc >= 10 && spsdata.level_idc <= 52) ?
- spsdata.level_idc : 0);
-
- // max_num_ref_frames should be between 0 and 16, anything larger will
- // be treated as invalid.
- Telemetry::Accumulate(Telemetry::VIDEO_H264_SPS_MAX_NUM_REF_FRAMES,
- std::min(spsdata.max_num_ref_frames, 17u));
-
- return false;
- }
-
- return true;
-}
-
MP4Demuxer::MP4Demuxer(MediaResource* aResource)
: mResource(aResource)
, mStream(new mp4_demuxer::ResourceStream(aResource))
@@ -243,25 +203,10 @@ MP4TrackDemuxer::MP4TrackDemuxer(MP4Demuxer* aParent,
EnsureUpToDateIndex(); // Force update of index
VideoInfo* videoInfo = mInfo->GetAsVideoInfo();
- // Collect telemetry from h264 AVCC SPS.
if (videoInfo &&
(mInfo->mMimeType.EqualsLiteral("video/mp4") ||
mInfo->mMimeType.EqualsLiteral("video/avc"))) {
mIsH264 = true;
- RefPtr<MediaByteBuffer> extraData = videoInfo->mExtraData;
- mNeedSPSForTelemetry = AccumulateSPSTelemetry(extraData);
- mp4_demuxer::SPSData spsdata;
- if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata) &&
- spsdata.pic_width > 0 && spsdata.pic_height > 0 &&
- mp4_demuxer::H264::EnsureSPSIsSane(spsdata)) {
- videoInfo->mImage.width = spsdata.pic_width;
- videoInfo->mImage.height = spsdata.pic_height;
- videoInfo->mDisplay.width = spsdata.display_width;
- videoInfo->mDisplay.height = spsdata.display_height;
- }
- } else {
- // No SPS to be found.
- mNeedSPSForTelemetry = false;
}
}
@@ -388,15 +333,6 @@ MP4TrackDemuxer::GetSamples(int32_t aNumSamples)
if (samples->mSamples.IsEmpty()) {
return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
} else {
- for (const auto& sample : samples->mSamples) {
- // Collect telemetry from h264 Annex B SPS.
- if (mNeedSPSForTelemetry && mp4_demuxer::AnnexB::HasSPS(sample)) {
- RefPtr<MediaByteBuffer> extradata =
- mp4_demuxer::AnnexB::ExtractExtraData(sample);
- mNeedSPSForTelemetry = AccumulateSPSTelemetry(extradata);
- }
- }
-
if (mNextKeyframeTime.isNothing() ||
samples->mSamples.LastElement()->mTime >= mNextKeyframeTime.value().ToMicroseconds()) {
SetNextKeyFrameTime();
diff --git a/dom/media/fmp4/MP4Stream.cpp b/dom/media/fmp4/MP4Stream.cpp
index 615a7dc01..9a79cac7a 100644
--- a/dom/media/fmp4/MP4Stream.cpp
+++ b/dom/media/fmp4/MP4Stream.cpp
@@ -48,9 +48,6 @@ MP4Stream::BlockingReadIntoCache(int64_t aOffset, size_t aCount, Monitor* aToUnl
return true;
}
-// We surreptitiously reimplement the supposedly-blocking ReadAt as a non-
-// blocking CachedReadAt, and record when it fails. This allows MP4Reader
-// to retry the read as an actual blocking read without holding the lock.
bool
MP4Stream::ReadAt(int64_t aOffset, void* aBuffer, size_t aCount,
size_t* aBytesRead)
diff --git a/dom/media/fmp4/moz.build b/dom/media/fmp4/moz.build
index 6a249ae3e..a79fb0229 100644
--- a/dom/media/fmp4/moz.build
+++ b/dom/media/fmp4/moz.build
@@ -20,6 +20,3 @@ SOURCES += [
]
FINAL_LIBRARY = 'xul'
-
-if CONFIG['MOZ_GONK_MEDIACODEC']:
- DEFINES['MOZ_GONK_MEDIACODEC'] = True
diff --git a/dom/media/gmp/GMPParent.cpp b/dom/media/gmp/GMPParent.cpp
index 40c3e5141..418f14736 100644
--- a/dom/media/gmp/GMPParent.cpp
+++ b/dom/media/gmp/GMPParent.cpp
@@ -516,8 +516,7 @@ GMPCapability::Supports(const nsTArray<GMPCapability>& aCapabilities,
#ifdef XP_WIN
// Clearkey on Windows advertises that it can decode in its GMP info
// file, but uses Windows Media Foundation to decode. That's not present
- // on Windows XP, and on some Vista, Windows N, and KN variants without
- // certain services packs.
+ // on Windows N and KN variants without certain services packs.
if (tag.Equals(kEMEKeySystemClearkey)) {
if (capabilities.mAPIName.EqualsLiteral(GMP_API_VIDEO_DECODER)) {
if (!WMFDecoderModule::HasH264()) {
diff --git a/dom/media/gmp/widevine-adapter/WidevineAdapter.cpp b/dom/media/gmp/widevine-adapter/WidevineAdapter.cpp
index 74b5c38e8..57d4ecec2 100644
--- a/dom/media/gmp/widevine-adapter/WidevineAdapter.cpp
+++ b/dom/media/gmp/widevine-adapter/WidevineAdapter.cpp
@@ -46,7 +46,7 @@ void* GetCdmHost(int aHostInterfaceVersion, void* aUserData)
Log("GetCdmHostFunc(%d, %p)", aHostInterfaceVersion, aUserData);
WidevineDecryptor* decryptor = reinterpret_cast<WidevineDecryptor*>(aUserData);
MOZ_ASSERT(decryptor);
- return static_cast<cdm::Host_8*>(decryptor);
+ return static_cast<cdm::Host_9*>(decryptor);
}
#define STRINGIFY(s) _STRINGIFY(s)
@@ -106,8 +106,8 @@ WidevineAdapter::GMPGetAPI(const char* aAPIName,
WidevineDecryptor* decryptor = new WidevineDecryptor();
- auto cdm = reinterpret_cast<cdm::ContentDecryptionModule*>(
- create(cdm::ContentDecryptionModule::kVersion,
+ auto cdm = reinterpret_cast<cdm::ContentDecryptionModule_9*>(
+ create(cdm::ContentDecryptionModule_9::kVersion,
kEMEKeySystemWidevine.get(),
kEMEKeySystemWidevine.Length(),
&GetCdmHost,
@@ -161,8 +161,8 @@ WidevineAdapter::Supports(int32_t aModuleVersion,
int32_t aHostVersion)
{
return aModuleVersion == CDM_MODULE_VERSION &&
- aInterfaceVersion == cdm::ContentDecryptionModule::kVersion &&
- aHostVersion == cdm::Host_8::kVersion;
+ aInterfaceVersion == cdm::ContentDecryptionModule_9::kVersion &&
+ aHostVersion == cdm::Host_9::kVersion;
}
} // namespace mozilla
diff --git a/dom/media/gmp/widevine-adapter/WidevineDecryptor.cpp b/dom/media/gmp/widevine-adapter/WidevineDecryptor.cpp
index 149fa1701..4d3408804 100644
--- a/dom/media/gmp/widevine-adapter/WidevineDecryptor.cpp
+++ b/dom/media/gmp/widevine-adapter/WidevineDecryptor.cpp
@@ -102,7 +102,7 @@ WidevineDecryptor::CreateSession(uint32_t aCreateSessionToken,
} else {
// Invalid init data type
const char* errorMsg = "Invalid init data type when creating session.";
- OnRejectPromise(aPromiseId, kNotSupportedError, 0, errorMsg, sizeof(errorMsg));
+ OnRejectPromise(aPromiseId, kExceptionNotSupportedError, 0, errorMsg, sizeof(errorMsg));
return;
}
mPromiseIdToNewSessionTokens[aPromiseId] = aCreateSessionToken;
@@ -302,6 +302,12 @@ WidevineDecryptor::GetCurrentWallTime()
}
void
+WidevineDecryptor::OnResolveKeyStatusPromise(uint32_t aPromiseId,
+ cdm::KeyStatus aKeyStatus) {
+ //TODO: The callback of GetStatusForPolicy. See Mozilla bug 1404230.
+}
+
+void
WidevineDecryptor::OnResolveNewSessionPromise(uint32_t aPromiseId,
const char* aSessionId,
uint32_t aSessionIdSize)
@@ -333,41 +339,59 @@ WidevineDecryptor::OnResolvePromise(uint32_t aPromiseId)
}
static GMPDOMException
-ToGMPDOMException(cdm::Error aError)
-{
- switch (aError) {
- case kNotSupportedError: return kGMPNotSupportedError;
- case kInvalidStateError: return kGMPInvalidStateError;
- case kInvalidAccessError:
- // Note: Chrome converts kInvalidAccessError to TypeError, since the
- // Chromium CDM API doesn't have a type error enum value. The EME spec
- // requires TypeError in some places, so we do the same conversion.
- // See bug 1313202.
- return kGMPTypeError;
- case kQuotaExceededError: return kGMPQuotaExceededError;
+ConvertCDMExceptionToGMPDOMException(cdm::Exception aException)
+{
+ switch (aException) {
+ case kExceptionNotSupportedError: return kGMPNotSupportedError;
+ case kExceptionInvalidStateError: return kGMPInvalidStateError;
+ case kExceptionTypeError: return kGMPTypeError;
+ case kExceptionQuotaExceededError: return kGMPQuotaExceededError;
case kUnknownError: return kGMPInvalidModificationError; // Note: Unique placeholder.
case kClientError: return kGMPAbortError; // Note: Unique placeholder.
case kOutputError: return kGMPSecurityError; // Note: Unique placeholder.
};
- return kGMPTimeoutError; // Note: Unique placeholder.
+ return kGMPInvalidStateError; // Note: Unique placeholder.
+}
+
+// Align with spec, the Exceptions used by CDM to reject promises .
+// https://w3c.github.io/encrypted-media/#exceptions
+cdm::Exception
+ConvertCDMErrorToCDMException(cdm::Error error) {
+ switch (error) {
+ case cdm::kNotSupportedError:
+ return cdm::Exception::kExceptionNotSupportedError;
+ case cdm::kInvalidStateError:
+ return cdm::Exception::kExceptionInvalidStateError;
+ case cdm::kInvalidAccessError:
+ return cdm::Exception::kExceptionTypeError;
+ case cdm::kQuotaExceededError:
+ return cdm::Exception::kExceptionQuotaExceededError;
+
+ case cdm::kUnknownError:
+ case cdm::kClientError:
+ case cdm::kOutputError:
+ break;
+ }
+
+ return cdm::Exception::kExceptionInvalidStateError;
}
void
WidevineDecryptor::OnRejectPromise(uint32_t aPromiseId,
- Error aError,
+ cdm::Exception aException,
uint32_t aSystemCode,
const char* aErrorMessage,
uint32_t aErrorMessageSize)
{
if (!mCallback) {
Log("Decryptor::OnRejectPromise(aPromiseId=%d, err=%d, sysCode=%u, msg=%s) FAIL; !mCallback",
- aPromiseId, (int)aError, aSystemCode, aErrorMessage);
+ aPromiseId, (int)aException, aSystemCode, aErrorMessage);
return;
}
Log("Decryptor::OnRejectPromise(aPromiseId=%d, err=%d, sysCode=%u, msg=%s)",
- aPromiseId, (int)aError, aSystemCode, aErrorMessage);
+ aPromiseId, (int)aException, aSystemCode, aErrorMessage);
mCallback->RejectPromise(aPromiseId,
- ToGMPDOMException(aError),
+ ConvertCDMExceptionToGMPDOMException(aException),
!aErrorMessageSize ? "" : aErrorMessage,
aErrorMessageSize);
}
@@ -386,11 +410,9 @@ ToGMPMessageType(MessageType message_type)
void
WidevineDecryptor::OnSessionMessage(const char* aSessionId,
uint32_t aSessionIdSize,
- MessageType aMessageType,
+ cdm::MessageType aMessageType,
const char* aMessage,
- uint32_t aMessageSize,
- const char* aLegacyDestinationUrl,
- uint32_t aLegacyDestinationUrlLength)
+ uint32_t aMessageSize)
{
if (!mCallback) {
Log("Decryptor::OnSessionMessage() FAIL; !mCallback");
@@ -479,28 +501,6 @@ WidevineDecryptor::OnSessionClosed(const char* aSessionId,
}
void
-WidevineDecryptor::OnLegacySessionError(const char* aSessionId,
- uint32_t aSessionIdLength,
- Error aError,
- uint32_t aSystemCode,
- const char* aErrorMessage,
- uint32_t aErrorMessageLength)
-{
- if (!mCallback) {
- Log("Decryptor::OnLegacySessionError(sid=%s, error=%d) FAIL; !mCallback",
- aSessionId, (int)aError);
- return;
- }
- Log("Decryptor::OnLegacySessionError(sid=%s, error=%d)", aSessionId, (int)aError);
- mCallback->SessionError(aSessionId,
- aSessionIdLength,
- ToGMPDOMException(aError),
- aSystemCode,
- aErrorMessage,
- aErrorMessageLength);
-}
-
-void
WidevineDecryptor::SendPlatformChallenge(const char* aServiceId,
uint32_t aServiceIdSize,
const char* aChallenge,
@@ -538,4 +538,17 @@ WidevineDecryptor::CreateFileIO(FileIOClient* aClient)
return new WidevineFileIO(aClient);
}
+void
+WidevineDecryptor::RequestStorageId(uint32_t aVersion)
+{
+ Log("Decryptor::RequestStorageId() aVersion = %u", aVersion);
+ if (aVersion >= 0x80000000) {
+ mCDM->OnStorageId(aVersion, nullptr, 0);
+ return;
+ }
+
+ //TODO: Need to provide a menaingful buffer instead of a dummy one.
+ mCDM->OnStorageId(aVersion, new uint8_t[1024*1024], 1024 * 1024);
+}
+
} // namespace mozilla
diff --git a/dom/media/gmp/widevine-adapter/WidevineDecryptor.h b/dom/media/gmp/widevine-adapter/WidevineDecryptor.h
index d5185192b..f291c321d 100644
--- a/dom/media/gmp/widevine-adapter/WidevineDecryptor.h
+++ b/dom/media/gmp/widevine-adapter/WidevineDecryptor.h
@@ -16,7 +16,7 @@
namespace mozilla {
class WidevineDecryptor : public GMPDecryptor
- , public cdm::Host_8
+ , public cdm::Host_9
{
public:
@@ -69,16 +69,19 @@ public:
void DecryptingComplete() override;
- // cdm::Host_8
+ // cdm::Host_9 implementation
cdm::Buffer* Allocate(uint32_t aCapacity) override;
void SetTimer(int64_t aDelayMs, void* aContext) override;
cdm::Time GetCurrentWallTime() override;
+ // cdm::Host_9 interface
+ void OnResolveKeyStatusPromise(uint32_t aPromiseId,
+ cdm::KeyStatus aKeyStatus) override;
void OnResolveNewSessionPromise(uint32_t aPromiseId,
const char* aSessionId,
uint32_t aSessionIdSize) override;
void OnResolvePromise(uint32_t aPromiseId) override;
void OnRejectPromise(uint32_t aPromiseId,
- cdm::Error aError,
+ cdm::Exception aException,
uint32_t aSystemCode,
const char* aErrorMessage,
uint32_t aErrorMessageSize) override;
@@ -86,9 +89,7 @@ public:
uint32_t aSessionIdSize,
cdm::MessageType aMessageType,
const char* aMessage,
- uint32_t aMessageSize,
- const char* aLegacyDestinationUrl,
- uint32_t aLegacyDestinationUrlLength) override;
+ uint32_t aMessageSize) override;
void OnSessionKeysChange(const char* aSessionId,
uint32_t aSessionIdSize,
bool aHasAdditionalUsableKey,
@@ -99,12 +100,6 @@ public:
cdm::Time aNewExpiryTime) override;
void OnSessionClosed(const char* aSessionId,
uint32_t aSessionIdSize) override;
- void OnLegacySessionError(const char* aSessionId,
- uint32_t aSessionId_length,
- cdm::Error aError,
- uint32_t aSystemCode,
- const char* aErrorMessage,
- uint32_t aErrorMessageLength) override;
void SendPlatformChallenge(const char* aServiceId,
uint32_t aServiceIdSize,
const char* aChallenge,
@@ -113,6 +108,9 @@ public:
void QueryOutputProtectionStatus() override;
void OnDeferredInitializationDone(cdm::StreamType aStreamType,
cdm::Status aDecoderStatus) override;
+ // cdm::Host_9 interface
+ // NOTE: the interface has changed upstream.
+ void RequestStorageId(uint32_t aVersion) override;
cdm::FileIO* CreateFileIO(cdm::FileIOClient* aClient) override;
GMPDecryptorCallback* Callback() const { return mCallback; }
@@ -120,7 +118,7 @@ public:
private:
~WidevineDecryptor();
RefPtr<CDMWrapper> mCDM;
- cdm::ContentDecryptionModule_8* CDM() { return mCDM->GetCDM(); }
+ cdm::ContentDecryptionModule_9* CDM() { return mCDM->GetCDM(); }
GMPDecryptorCallback* mCallback;
std::map<uint32_t, uint32_t> mPromiseIdToNewSessionTokens;
diff --git a/dom/media/gmp/widevine-adapter/WidevineUtils.cpp b/dom/media/gmp/widevine-adapter/WidevineUtils.cpp
index 925dfe1a1..10c6c2e18 100644
--- a/dom/media/gmp/widevine-adapter/WidevineUtils.cpp
+++ b/dom/media/gmp/widevine-adapter/WidevineUtils.cpp
@@ -43,7 +43,7 @@ ToGMPErr(cdm::Status aStatus)
case cdm::kSuccess: return GMPNoErr;
case cdm::kNeedMoreData: return GMPGenericErr;
case cdm::kNoKey: return GMPNoKeyErr;
- case cdm::kSessionError: return GMPGenericErr;
+ case cdm::kInitializationError: return GMPGenericErr;
case cdm::kDecryptError: return GMPCryptoErr;
case cdm::kDecodeError: return GMPDecodeErr;
case cdm::kDeferredInitialization: return GMPGenericErr;
@@ -77,7 +77,7 @@ void InitInputBuffer(const GMPEncryptedBufferMetadata* aCrypto,
aInputBuffer.timestamp = aTimestamp;
}
-CDMWrapper::CDMWrapper(cdm::ContentDecryptionModule_8* aCDM,
+CDMWrapper::CDMWrapper(cdm::ContentDecryptionModule_9* aCDM,
WidevineDecryptor* aDecryptor)
: mCDM(aCDM)
, mDecryptor(aDecryptor)
diff --git a/dom/media/gmp/widevine-adapter/WidevineUtils.h b/dom/media/gmp/widevine-adapter/WidevineUtils.h
index 57c004a87..2f6137fe3 100644
--- a/dom/media/gmp/widevine-adapter/WidevineUtils.h
+++ b/dom/media/gmp/widevine-adapter/WidevineUtils.h
@@ -48,12 +48,16 @@ class CDMWrapper {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CDMWrapper)
- explicit CDMWrapper(cdm::ContentDecryptionModule_8* aCDM,
+ explicit CDMWrapper(cdm::ContentDecryptionModule_9* aCDM,
WidevineDecryptor* aDecryptor);
- cdm::ContentDecryptionModule_8* GetCDM() const { return mCDM; }
+ cdm::ContentDecryptionModule_9* GetCDM() const { return mCDM; }
+ void OnStorageId(uint32_t aVersion, const uint8_t* aStorageId,
+ uint32_t aStorageIdSize) {
+ mCDM->OnStorageId(aVersion, aStorageId, aStorageIdSize);
+ }
private:
~CDMWrapper();
- cdm::ContentDecryptionModule_8* mCDM;
+ cdm::ContentDecryptionModule_9* mCDM;
RefPtr<WidevineDecryptor> mDecryptor;
};
diff --git a/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h b/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h
index b143f75f7..f5e63519b 100644
--- a/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h
+++ b/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h
@@ -45,7 +45,7 @@ private:
~WidevineVideoDecoder();
- cdm::ContentDecryptionModule_8* CDM() const {
+ cdm::ContentDecryptionModule_9* CDM() const {
// CDM should only be accessed before 'DecodingComplete'.
MOZ_ASSERT(mCDMWrapper);
// CDMWrapper ensure the CDM is non-null, no need to check again.
diff --git a/dom/media/gmp/widevine-adapter/content_decryption_module.h b/dom/media/gmp/widevine-adapter/content_decryption_module.h
index 512ca9768..0539135fb 100644
--- a/dom/media/gmp/widevine-adapter/content_decryption_module.h
+++ b/dom/media/gmp/widevine-adapter/content_decryption_module.h
@@ -5,6 +5,8 @@
#ifndef CDM_CONTENT_DECRYPTION_MODULE_H_
#define CDM_CONTENT_DECRYPTION_MODULE_H_
+#include "content_decryption_module_export.h"
+
#if defined(_MSC_VER)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
@@ -14,25 +16,21 @@ typedef __int64 int64_t;
#include <stdint.h>
#endif
-// Define CDM_EXPORT so that functionality implemented by the CDM module
-// can be exported to consumers.
-#if defined(WIN32)
-
-#if defined(CDM_IMPLEMENTATION)
-#define CDM_EXPORT __declspec(dllexport)
-#else
-#define CDM_EXPORT __declspec(dllimport)
-#endif // defined(CDM_IMPLEMENTATION)
-
-#else // defined(WIN32)
-
-#if defined(CDM_IMPLEMENTATION)
-#define CDM_EXPORT __attribute__((visibility("default")))
+// Define CDM_CLASS_API to export class types. We have to add visibility
+// attributes to make sure virtual tables in CDM consumer and CDM implementation
+// are the same. Generally, it was always a good idea, as there're no guarantees
+// about that for the internal symbols, but it has only become a practical issue
+// after introduction of LTO devirtualization. See more details on
+// https://crbug.com/609564#c35
+#if defined(_WIN32)
+#if defined(__clang__)
+#define CDM_CLASS_API [[clang::lto_visibility_public]]
#else
-#define CDM_EXPORT
+#define CDM_CLASS_API
#endif
-
-#endif // defined(WIN32)
+#else // defined(_WIN32)
+#define CDM_CLASS_API __attribute__((visibility("default")))
+#endif // defined(_WIN32)
// The version number must be rolled when the exported functions are updated!
// If the CDM and the adapter use different versions of these functions, the
@@ -48,9 +46,9 @@ typedef __int64 int64_t;
#define BUILD_ENTRYPOINT_NO_EXPANSION(name, version) name##_##version
extern "C" {
-CDM_EXPORT void INITIALIZE_CDM_MODULE();
+CDM_API void INITIALIZE_CDM_MODULE();
-CDM_EXPORT void DeinitializeCdmModule();
+CDM_API void DeinitializeCdmModule();
// Returns a pointer to the requested CDM Host interface upon success.
// Returns NULL if the requested CDM Host interface is not supported.
@@ -65,30 +63,30 @@ typedef void* (*GetCdmHostFunc)(int host_interface_version, void* user_data);
// |cdm_interface_version|.
// Caller retains ownership of arguments and must call Destroy() on the returned
// object.
-CDM_EXPORT void* CreateCdmInstance(
+CDM_API void* CreateCdmInstance(
int cdm_interface_version,
const char* key_system, uint32_t key_system_size,
GetCdmHostFunc get_cdm_host_func, void* user_data);
-CDM_EXPORT const char* GetCdmVersion();
+CDM_API const char* GetCdmVersion();
}
namespace cdm {
-class AudioFrames;
-class DecryptedBlock;
-class VideoFrame;
+class CDM_CLASS_API AudioFrames;
+class CDM_CLASS_API DecryptedBlock;
+class CDM_CLASS_API VideoFrame;
-class Host_7;
-class Host_8;
+class CDM_CLASS_API Host_8;
+class CDM_CLASS_API Host_9;
enum Status {
kSuccess = 0,
kNeedMoreData, // Decoder needs more data to produce a decoded frame/sample.
- kNoKey, // The required decryption key is not available.
- kSessionError, // Session management error.
- kDecryptError, // Decryption failed.
- kDecodeError, // Error decoding audio or video.
+ kNoKey, // The required decryption key is not available.
+ kInitializationError, // Initialization error.
+ kDecryptError, // Decryption failed.
+ kDecodeError, // Error decoding audio or video.
kDeferredInitialization // Decoder is not ready for initialization.
};
@@ -97,6 +95,7 @@ enum Status {
// The following starts with the list of DOM4 exceptions from:
// http://www.w3.org/TR/dom/#domexception
// Some DOM4 exceptions are not included as they are not expected to be used.
+// Should only be used on Host_8 and before.
enum Error {
kNotSupportedError = 9,
kInvalidStateError = 11,
@@ -113,8 +112,20 @@ enum Error {
kOutputError = 101
};
-// Time is defined as the number of seconds since the
-// Epoch (00:00:00 UTC, January 1, 1970).
+// Exceptions used by the CDM to reject promises.
+// https://w3c.github.io/encrypted-media/#exceptions
+enum Exception {
+ kExceptionTypeError,
+ kExceptionNotSupportedError,
+ kExceptionInvalidStateError,
+ kExceptionQuotaExceededError
+};
+
+// Time is defined as the number of seconds since the Epoch
+// (00:00:00 UTC, January 1, 1970), not including any added leap second.
+// Also see Time definition in spec: https://w3c.github.io/encrypted-media/#time
+// Note that Time is defined in millisecond accuracy in the spec but in second
+// accuracy here.
typedef double Time;
// An input buffer can be split into several continuous subsamples.
@@ -151,13 +162,13 @@ struct SubsampleEntry {
// unencrypted.
struct InputBuffer {
InputBuffer()
- : data(NULL),
+ : data(nullptr),
data_size(0),
- key_id(NULL),
+ key_id(nullptr),
key_id_size(0),
- iv(NULL),
+ iv(nullptr),
iv_size(0),
- subsamples(NULL),
+ subsamples(nullptr),
num_subsamples(0),
timestamp(0) {}
@@ -188,7 +199,7 @@ struct AudioDecoderConfig {
channel_count(0),
bits_per_channel(0),
samples_per_second(0),
- extra_data(NULL),
+ extra_data(nullptr),
extra_data_size(0) {}
AudioCodec codec;
@@ -214,10 +225,25 @@ enum AudioFormat {
};
// Surface formats based on FOURCC labels, see: http://www.fourcc.org/yuv.php
+// Values are chosen to be consistent with Chromium's VideoPixelFormat values.
enum VideoFormat {
kUnknownVideoFormat = 0, // Unknown format value. Used for error reporting.
- kYv12, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
- kI420 // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
+ kYv12 = 1, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
+ kI420 = 2, // 12bpp YUV planar 1x1 Y, 2x2 UV samples.
+
+ // In the following formats, each sample uses 16-bit in storage, while the
+ // sample value is stored in the least significant N bits where N is
+ // specified by the number after "P". For example, for YUV420P9, each Y, U,
+ // and V sample is stored in the least significant 9 bits in a 2-byte block.
+ kYUV420P9 = 16,
+ kYUV420P10 = 17,
+ kYUV422P9 = 18,
+ kYUV422P10 = 19,
+ kYUV444P9 = 20,
+ kYUV444P10 = 21,
+ kYUV420P12 = 22,
+ kYUV422P12 = 23,
+ kYUV444P12 = 24,
};
struct Size {
@@ -245,14 +271,19 @@ struct VideoDecoderConfig {
kH264ProfileHigh,
kH264ProfileHigh10,
kH264ProfileHigh422,
- kH264ProfileHigh444Predictive
+ kH264ProfileHigh444Predictive,
+ // VP9 Profiles are only passed in starting from CDM_9.
+ kVP9Profile0,
+ kVP9Profile1,
+ kVP9Profile2,
+ kVP9Profile3
};
VideoDecoderConfig()
: codec(kUnknownVideoCodec),
profile(kUnknownVideoCodecProfile),
format(kUnknownVideoFormat),
- extra_data(NULL),
+ extra_data(nullptr),
extra_data_size(0) {}
VideoCodec codec;
@@ -294,7 +325,7 @@ struct PlatformChallengeResponse {
// Used when passing arrays of binary data. Does not own the referenced data.
struct BinaryData {
- BinaryData() : data(NULL), length(0) {}
+ BinaryData() : data(nullptr), length(0) {}
const uint8_t* data;
uint32_t length;
};
@@ -316,7 +347,10 @@ enum KeyStatus {
// should be 0 when |status| == kUsable.
struct KeyInformation {
KeyInformation()
- : key_id(NULL), key_id_size(0), status(kInternalError), system_code(0) {}
+ : key_id(nullptr),
+ key_id_size(0),
+ status(kInternalError),
+ system_code(0) {}
const uint8_t* key_id;
uint32_t key_id_size;
KeyStatus status;
@@ -372,6 +406,24 @@ enum MessageType {
kLicenseRelease = 2
};
+enum HdcpVersion {
+ kHdcpVersionNone,
+ kHdcpVersion1_0,
+ kHdcpVersion1_1,
+ kHdcpVersion1_2,
+ kHdcpVersion1_3,
+ kHdcpVersion1_4,
+ kHdcpVersion2_0,
+ kHdcpVersion2_1,
+ kHdcpVersion2_2
+};
+
+struct Policy {
+ Policy() : min_hdcp_version(kHdcpVersionNone) {}
+
+ HdcpVersion min_hdcp_version;
+};
+
// FileIO interface provides a way for the CDM to store data in a file in
// persistent storage. This interface aims only at providing basic read/write
// capabilities and should not be used as a full fledged file IO API.
@@ -381,7 +433,7 @@ enum MessageType {
// Note to implementors of this interface:
// Per-origin storage and the ability for users to clear it are important.
// See http://www.w3.org/TR/encrypted-media/#privacy-storedinfo.
-class FileIO {
+class CDM_CLASS_API FileIO {
public:
// Opens the file with |file_name| for read and write.
// FileIOClient::OnOpenComplete() will be called after the opening
@@ -389,8 +441,9 @@ class FileIO {
// - When the file is opened by a CDM instance, it will be classified as "in
// use". In this case other CDM instances in the same domain may receive
// kInUse status when trying to open it.
- // - |file_name| must not contain forward slash ('/') or backslash ('\'), and
- // must not start with an underscore ('_').
+ // - |file_name| must only contain letters (A-Za-z), digits(0-9), or "._-".
+ // It must not start with an underscore ('_'), and must be at least 1
+ // character and no more than 256 characters long.
virtual void Open(const char* file_name, uint32_t file_name_size) = 0;
// Reads the contents of the file. FileIOClient::OnReadComplete() will be
@@ -421,7 +474,7 @@ class FileIO {
// When kError is returned, the FileIO object could be in an error state. All
// following calls (other than Close()) could return kError. The CDM should
// still call Close() to destroy the FileIO object.
-class FileIOClient {
+class CDM_CLASS_API FileIOClient {
public:
enum Status {
kSuccess = 0,
@@ -462,10 +515,20 @@ class FileIOClient {
// provided in CreateCdmInstance() to allocate any Buffer that needs to
// be passed back to the caller. Implementations must call Buffer::Destroy()
// when a Buffer is created that will never be returned to the caller.
-class ContentDecryptionModule_7 {
+class CDM_CLASS_API ContentDecryptionModule_8 {
public:
- static const int kVersion = 7;
- typedef Host_7 Host;
+ static const int kVersion = 8;
+ typedef Host_8 Host;
+
+ // Initializes the CDM instance, providing information about permitted
+ // functionalities.
+ // If |allow_distinctive_identifier| is false, messages from the CDM,
+ // such as message events, must not contain a Distinctive Identifier,
+ // even in an encrypted form.
+ // If |allow_persistent_state| is false, the CDM must not attempt to
+ // persist state. Calls to CreateFileIO() will fail.
+ virtual void Initialize(bool allow_distinctive_identifier,
+ bool allow_persistent_state) = 0;
// SetServerCertificate(), CreateSessionAndGenerateRequest(), LoadSession(),
// UpdateSession(), CloseSession(), and RemoveSession() all accept a
@@ -484,8 +547,7 @@ class ContentDecryptionModule_7 {
// or Host::OnRejectPromise().
virtual void CreateSessionAndGenerateRequest(uint32_t promise_id,
SessionType session_type,
- const char* init_data_type,
- uint32_t init_data_type_size,
+ InitDataType init_data_type,
const uint8_t* init_data,
uint32_t init_data_size) = 0;
@@ -631,8 +693,8 @@ class ContentDecryptionModule_7 {
virtual void Destroy() = 0;
protected:
- ContentDecryptionModule_7() {}
- virtual ~ContentDecryptionModule_7() {}
+ ContentDecryptionModule_8() {}
+ virtual ~ContentDecryptionModule_8() {}
};
// ContentDecryptionModule interface that all CDMs need to implement.
@@ -641,10 +703,10 @@ class ContentDecryptionModule_7 {
// provided in CreateCdmInstance() to allocate any Buffer that needs to
// be passed back to the caller. Implementations must call Buffer::Destroy()
// when a Buffer is created that will never be returned to the caller.
-class ContentDecryptionModule_8 {
+class CDM_CLASS_API ContentDecryptionModule_9 {
public:
- static const int kVersion = 8;
- typedef Host_8 Host;
+ static const int kVersion = 9;
+ typedef Host_9 Host;
// Initializes the CDM instance, providing information about permitted
// functionalities.
@@ -656,6 +718,13 @@ class ContentDecryptionModule_8 {
virtual void Initialize(bool allow_distinctive_identifier,
bool allow_persistent_state) = 0;
+ // Gets the key status if the CDM has a hypothetical key with the |policy|.
+ // The CDM must respond by calling either Host::OnResolveKeyStatusPromise()
+ // with the result key status or Host::OnRejectPromise() if an unexpected
+ // error happened or this method is not supported.
+ virtual void GetStatusForPolicy(uint32_t promise_id,
+ const Policy& policy) = 0;
+
// SetServerCertificate(), CreateSessionAndGenerateRequest(), LoadSession(),
// UpdateSession(), CloseSession(), and RemoveSession() all accept a
// |promise_id|, which must be passed to the completion Host method
@@ -731,8 +800,8 @@ class ContentDecryptionModule_8 {
//
// Returns kSuccess if the |audio_decoder_config| is supported and the CDM
// audio decoder is successfully initialized.
- // Returns kSessionError if |audio_decoder_config| is not supported. The CDM
- // may still be able to do Decrypt().
+ // Returns kInitializationError if |audio_decoder_config| is not supported.
+ // The CDM may still be able to do Decrypt().
// Returns kDeferredInitialization if the CDM is not ready to initialize the
// decoder at this time. Must call Host::OnDeferredInitializationDone() once
// initialization is complete.
@@ -744,8 +813,8 @@ class ContentDecryptionModule_8 {
//
// Returns kSuccess if the |video_decoder_config| is supported and the CDM
// video decoder is successfully initialized.
- // Returns kSessionError if |video_decoder_config| is not supported. The CDM
- // may still be able to do Decrypt().
+ // Returns kInitializationError if |video_decoder_config| is not supported.
+ // The CDM may still be able to do Decrypt().
// Returns kDeferredInitialization if the CDM is not ready to initialize the
// decoder at this time. Must call Host::OnDeferredInitializationDone() once
// initialization is complete.
@@ -815,18 +884,30 @@ class ContentDecryptionModule_8 {
uint32_t link_mask,
uint32_t output_protection_mask) = 0;
+ // Called by the host after a call to Host::RequestStorageId(). If the
+ // version of the storage ID requested is available, |storage_id| and
+ // |storage_id_size| are set appropriately. |version| will be the same as
+ // what was requested, unless 0 (latest) was requested, in which case
+ // |version| will be the actual version number for the |storage_id| returned.
+ // If the requested version is not available, null/zero will be provided as
+ // |storage_id| and |storage_id_size|, respectively, and |version| should be
+ // ignored.
+ virtual void OnStorageId(uint32_t version,
+ const uint8_t* storage_id,
+ uint32_t storage_id_size) = 0;
+
// Destroys the object in the same context as it was created.
virtual void Destroy() = 0;
protected:
- ContentDecryptionModule_8() {}
- virtual ~ContentDecryptionModule_8() {}
+ ContentDecryptionModule_9() {}
+ virtual ~ContentDecryptionModule_9() {}
};
-typedef ContentDecryptionModule_8 ContentDecryptionModule;
+typedef ContentDecryptionModule_9 ContentDecryptionModule;
// Represents a buffer created by Allocator implementations.
-class Buffer {
+class CDM_CLASS_API Buffer {
public:
// Destroys the buffer in the same context as it was created.
virtual void Destroy() = 0;
@@ -845,9 +926,9 @@ class Buffer {
void operator=(const Buffer&);
};
-class Host_7 {
+class CDM_CLASS_API Host_8 {
public:
- static const int kVersion = 7;
+ static const int kVersion = 8;
// Returns a Buffer* containing non-zero members upon success, or NULL on
// failure. The caller owns the Buffer* after this call. The buffer is not
@@ -859,7 +940,7 @@ class Host_7 {
// from now with |context|.
virtual void SetTimer(int64_t delay_ms, void* context) = 0;
- // Returns the current wall time in seconds.
+ // Returns the current wall time.
virtual Time GetCurrentWallTime() = 0;
// Called by the CDM when a session is created or loaded and the value for the
@@ -917,8 +998,10 @@ class Host_7 {
// session |session_id|. This can happen as the result of an Update() call
// or some other event. If this happens as a result of a call to Update(),
// it must be called before resolving the Update() promise. |new_expiry_time|
- // can be 0 to represent "undefined". Size parameter should not include
- // null termination.
+ // represents the time after which the key(s) in the session will no longer
+ // be usable for decryption. It can be 0 if no such time exists or if the
+ // license explicitly never expires. Size parameter should not include null
+ // termination.
virtual void OnExpirationChange(const char* session_id,
uint32_t session_id_size,
Time new_expiry_time) = 0;
@@ -978,13 +1061,13 @@ class Host_7 {
virtual FileIO* CreateFileIO(FileIOClient* client) = 0;
protected:
- Host_7() {}
- virtual ~Host_7() {}
+ Host_8() {}
+ virtual ~Host_8() {}
};
-class Host_8 {
+class CDM_CLASS_API Host_9 {
public:
- static const int kVersion = 8;
+ static const int kVersion = 9;
// Returns a Buffer* containing non-zero members upon success, or NULL on
// failure. The caller owns the Buffer* after this call. The buffer is not
@@ -996,9 +1079,14 @@ class Host_8 {
// from now with |context|.
virtual void SetTimer(int64_t delay_ms, void* context) = 0;
- // Returns the current wall time in seconds.
+ // Returns the current wall time.
virtual Time GetCurrentWallTime() = 0;
+ // Called by the CDM when a key status is available in response to
+ // GetStatusForPolicy().
+ virtual void OnResolveKeyStatusPromise(uint32_t promise_id,
+ KeyStatus key_status) = 0;
+
// Called by the CDM when a session is created or loaded and the value for the
// MediaKeySession's sessionId attribute is available (|session_id|).
// This must be called before OnSessionMessage() or
@@ -1016,26 +1104,21 @@ class Host_8 {
// Called by the CDM when an error occurs as a result of one of the
// ContentDecryptionModule calls that accept a |promise_id|.
- // |error| must be specified, |error_message| and |system_code|
+ // |exception| must be specified. |error_message| and |system_code|
// are optional. |error_message_size| should not include null termination.
virtual void OnRejectPromise(uint32_t promise_id,
- Error error,
+ Exception exception,
uint32_t system_code,
const char* error_message,
uint32_t error_message_size) = 0;
// Called by the CDM when it has a message for session |session_id|.
// Size parameters should not include null termination.
- // |legacy_destination_url| is only for supporting the prefixed EME API and
- // is ignored by unprefixed EME. It should only be non-null if |message_type|
- // is kLicenseRenewal.
virtual void OnSessionMessage(const char* session_id,
uint32_t session_id_size,
MessageType message_type,
const char* message,
- uint32_t message_size,
- const char* legacy_destination_url,
- uint32_t legacy_destination_url_length) = 0;
+ uint32_t message_size) = 0;
// Called by the CDM when there has been a change in keys or their status for
// session |session_id|. |has_additional_usable_key| should be set if a
@@ -1054,8 +1137,10 @@ class Host_8 {
// session |session_id|. This can happen as the result of an Update() call
// or some other event. If this happens as a result of a call to Update(),
// it must be called before resolving the Update() promise. |new_expiry_time|
- // can be 0 to represent "undefined". Size parameter should not include
- // null termination.
+ // represents the time after which the key(s) in the session will no longer
+ // be usable for decryption. It can be 0 if no such time exists or if the
+ // license explicitly never expires. Size parameter should not include null
+ // termination.
virtual void OnExpirationChange(const char* session_id,
uint32_t session_id_size,
Time new_expiry_time) = 0;
@@ -1065,21 +1150,6 @@ class Host_8 {
virtual void OnSessionClosed(const char* session_id,
uint32_t session_id_size) = 0;
- // Called by the CDM when an error occurs in session |session_id|
- // unrelated to one of the ContentDecryptionModule calls that accept a
- // |promise_id|. |error| must be specified, |error_message| and
- // |system_code| are optional. Length parameters should not include null
- // termination.
- // Note:
- // - This method is only for supporting prefixed EME API.
- // - This method will be ignored by unprefixed EME. All errors reported
- // in this method should probably also be reported by one of other methods.
- virtual void OnLegacySessionError(
- const char* session_id, uint32_t session_id_length,
- Error error,
- uint32_t system_code,
- const char* error_message, uint32_t error_message_length) = 0;
-
// The following are optional methods that may not be implemented on all
// platforms.
@@ -1114,13 +1184,22 @@ class Host_8 {
// CDM can call this method multiple times to operate on different files.
virtual FileIO* CreateFileIO(FileIOClient* client) = 0;
+ // Requests a specific version of the storage ID. A storage ID is a stable,
+ // device specific ID used by the CDM to securely store persistent data. The
+ // ID will be returned by the host via ContentDecryptionModule::OnStorageId().
+ // If |version| is 0, the latest version will be returned. All |version|s
+ // that are greater than or equal to 0x80000000 are reserved for the CDM and
+ // should not be supported or returned by the host. The CDM must not expose
+ // the ID outside the client device, even in encrypted form.
+ virtual void RequestStorageId(uint32_t version) = 0;
+
protected:
- Host_8() {}
- virtual ~Host_8() {}
+ Host_9() {}
+ virtual ~Host_9() {}
};
// Represents a decrypted block that has not been decoded.
-class DecryptedBlock {
+class CDM_CLASS_API DecryptedBlock {
public:
virtual void SetDecryptedBuffer(Buffer* buffer) = 0;
virtual Buffer* DecryptedBuffer() = 0;
@@ -1135,7 +1214,7 @@ class DecryptedBlock {
virtual ~DecryptedBlock() {}
};
-class VideoFrame {
+class CDM_CLASS_API VideoFrame {
public:
enum VideoPlane {
kYPlane = 0,
@@ -1178,7 +1257,7 @@ class VideoFrame {
//
// |<----------------- AudioFrames ------------------>|
// | audio buffer 0 | audio buffer 1 | audio buffer 2 |
-class AudioFrames {
+class CDM_CLASS_API AudioFrames {
public:
virtual void SetFrameBuffer(Buffer* buffer) = 0;
virtual Buffer* FrameBuffer() = 0;
diff --git a/dom/media/gmp/widevine-adapter/content_decryption_module_export.h b/dom/media/gmp/widevine-adapter/content_decryption_module_export.h
new file mode 100644
index 000000000..51d485892
--- /dev/null
+++ b/dom/media/gmp/widevine-adapter/content_decryption_module_export.h
@@ -0,0 +1,22 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CDM_CONTENT_DECRYPTION_MODULE_EXPORT_H_
+#define CDM_CONTENT_DECRYPTION_MODULE_EXPORT_H_
+
+// Define CDM_API so that functionality implemented by the CDM module
+// can be exported to consumers.
+#if defined(_WIN32)
+
+#if defined(CDM_IMPLEMENTATION)
+#define CDM_API __declspec(dllexport)
+#else
+#define CDM_API __declspec(dllimport)
+#endif // defined(CDM_IMPLEMENTATION)
+
+#else // defined(_WIN32)
+#define CDM_API __attribute__((visibility("default")))
+#endif // defined(_WIN32)
+
+#endif // CDM_CONTENT_DECRYPTION_MODULE_EXPORT_H_
diff --git a/dom/media/gmp/widevine-adapter/content_decryption_module_ext.h b/dom/media/gmp/widevine-adapter/content_decryption_module_ext.h
new file mode 100644
index 000000000..5df8344e6
--- /dev/null
+++ b/dom/media/gmp/widevine-adapter/content_decryption_module_ext.h
@@ -0,0 +1,64 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CDM_CONTENT_DECRYPTION_MODULE_EXT_H_
+#define CDM_CONTENT_DECRYPTION_MODULE_EXT_H_
+
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
+#include "content_decryption_module_export.h"
+
+#if defined(_MSC_VER)
+typedef unsigned int uint32_t;
+#else
+#include <stdint.h>
+#endif
+
+namespace cdm {
+
+#if defined(_WIN32)
+typedef wchar_t FilePathCharType;
+typedef HANDLE PlatformFile;
+const PlatformFile kInvalidPlatformFile = INVALID_HANDLE_VALUE;
+#else
+typedef char FilePathCharType;
+typedef int PlatformFile;
+const PlatformFile kInvalidPlatformFile = -1;
+#endif // defined(_WIN32)
+
+struct HostFile {
+ HostFile(const FilePathCharType* file_path,
+ PlatformFile file,
+ PlatformFile sig_file)
+ : file_path(file_path), file(file), sig_file(sig_file) {}
+
+ // File that is part of the host of the CDM.
+ const FilePathCharType* file_path = nullptr;
+ PlatformFile file = kInvalidPlatformFile;
+
+ // Signature file for |file|.
+ PlatformFile sig_file = kInvalidPlatformFile;
+};
+
+} // namespace cdm
+
+extern "C" {
+
+// Functions in this file are dynamically retrieved by their versioned function
+// names. Increment the version number for any backward incompatible API
+// changes.
+
+// Verifies CDM host. All files in |host_files| are opened in read-only mode.
+//
+// Returns false and closes all files if there is an immediate failure.
+// Otherwise returns true as soon as possible and processes the files
+// asynchronously. All files MUST be closed by the CDM after this one-time
+// processing is finished.
+CDM_API bool VerifyCdmHost_0(const cdm::HostFile* host_files,
+ uint32_t num_files);
+}
+
+#endif // CDM_CONTENT_DECRYPTION_MODULE_EXT_H_
diff --git a/dom/media/gtest/Cargo.toml b/dom/media/gtest/Cargo.toml
deleted file mode 100644
index a55f8fb68..000000000
--- a/dom/media/gtest/Cargo.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[package]
-name = "mp4parse-gtest"
-version = "0.1.0"
-authors = ["nobody@mozilla.org"]
-
-[lib]
-path = "hello.rs"
diff --git a/dom/media/gtest/TestMP3Demuxer.cpp b/dom/media/gtest/TestMP3Demuxer.cpp
index 8d2109f00..934acb60e 100644
--- a/dom/media/gtest/TestMP3Demuxer.cpp
+++ b/dom/media/gtest/TestMP3Demuxer.cpp
@@ -11,7 +11,6 @@
#include "MockMediaResource.h"
using namespace mozilla;
-using namespace mozilla::mp3;
using media::TimeUnit;
diff --git a/dom/media/gtest/TestMP4Reader.cpp b/dom/media/gtest/TestMP4Reader.cpp
deleted file mode 100644
index f08f7a40d..000000000
--- a/dom/media/gtest/TestMP4Reader.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "gtest/gtest.h"
-#include "MP4Reader.h"
-#include "MP4Decoder.h"
-#include "mozilla/SharedThreadPool.h"
-#include "MockMediaResource.h"
-#include "MockMediaDecoderOwner.h"
-#include "mozilla/Preferences.h"
-#include "TimeUnits.h"
-
-using namespace mozilla;
-using namespace mozilla::dom;
-
-class TestBinding
-{
-public:
- NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TestBinding);
-
- RefPtr<MP4Decoder> decoder;
- RefPtr<MockMediaResource> resource;
- RefPtr<MP4Reader> reader;
-
- explicit TestBinding(const char* aFileName = "gizmo.mp4")
- : decoder(new MP4Decoder())
- , resource(new MockMediaResource(aFileName))
- , reader(new MP4Reader(decoder))
- {
- EXPECT_EQ(NS_OK, Preferences::SetBool(
- "media.use-blank-decoder", true));
-
- EXPECT_EQ(NS_OK, resource->Open(nullptr));
- decoder->SetResource(resource);
-
- reader->Init(nullptr);
- // This needs to be done before invoking GetBuffered. This is normally
- // done by MediaDecoderStateMachine.
- reader->DispatchSetStartTime(0);
- }
-
- void Init() {
- nsCOMPtr<nsIThread> thread;
- nsCOMPtr<nsIRunnable> r = NewRunnableMethod(this, &TestBinding::ReadMetadata);
- nsresult rv = NS_NewThread(getter_AddRefs(thread), r);
- EXPECT_EQ(NS_OK, rv);
- thread->Shutdown();
- }
-
-private:
- virtual ~TestBinding()
- {
- {
- RefPtr<TaskQueue> queue = reader->OwnerThread();
- nsCOMPtr<nsIRunnable> task = NewRunnableMethod(reader, &MP4Reader::Shutdown);
- // Hackily bypass the tail dispatcher so that we can AwaitShutdownAndIdle.
- // In production code we'd use BeginShutdown + promises.
- queue->Dispatch(task.forget(), AbstractThread::AssertDispatchSuccess,
- AbstractThread::TailDispatch);
- queue->AwaitShutdownAndIdle();
- }
- decoder = nullptr;
- resource = nullptr;
- reader = nullptr;
- SharedThreadPool::SpinUntilEmpty();
- }
-
- void ReadMetadata()
- {
- MediaInfo info;
- MetadataTags* tags;
- EXPECT_EQ(NS_OK, reader->ReadMetadata(&info, &tags));
- }
-};
-
-TEST(MP4Reader, BufferedRange)
-{
- RefPtr<TestBinding> b = new TestBinding();
- b->Init();
-
- // Video 3-4 sec, audio 2.986666-4.010666 sec
- b->resource->MockAddBufferedRange(248400, 327455);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
- EXPECT_NEAR(270000 / 90000.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(360000 / 90000.0, ranges.End(0).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, BufferedRangeMissingLastByte)
-{
- RefPtr<TestBinding> b = new TestBinding();
- b->Init();
-
- // Dropping the last byte of the video
- b->resource->MockClearBufferedRanges();
- b->resource->MockAddBufferedRange(248400, 324912);
- b->resource->MockAddBufferedRange(324913, 327455);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
- EXPECT_NEAR(270000.0 / 90000.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(357000 / 90000.0, ranges.End(0).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, BufferedRangeSyncFrame)
-{
- RefPtr<TestBinding> b = new TestBinding();
- b->Init();
-
- // Check that missing the first byte at 2 seconds skips right through to 3
- // seconds because of a missing sync frame
- b->resource->MockClearBufferedRanges();
- b->resource->MockAddBufferedRange(146336, 327455);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
- EXPECT_NEAR(270000.0 / 90000.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(360000 / 90000.0, ranges.End(0).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, CompositionOrder)
-{
- RefPtr<TestBinding> b = new TestBinding("mediasource_test.mp4");
- b->Init();
-
- // The first 5 video samples of this file are:
- // Video timescale=2500
- // Frame Start Size Time Duration Sync
- // 1 48 5455 166 83 Yes
- // 2 5503 145 249 83
- // 3 6228 575 581 83
- // 4 7383 235 415 83
- // 5 8779 183 332 83
- // 6 9543 191 498 83
- //
- // Audio timescale=44100
- // 1 5648 580 0 1024 Yes
- // 2 6803 580 1024 1058 Yes
- // 3 7618 581 2082 1014 Yes
- // 4 8199 580 3096 1015 Yes
- // 5 8962 581 4111 1014 Yes
- // 6 9734 580 5125 1014 Yes
- // 7 10314 581 6139 1059 Yes
- // 8 11207 580 7198 1014 Yes
- // 9 12035 581 8212 1014 Yes
- // 10 12616 580 9226 1015 Yes
- // 11 13220 581 10241 1014 Yes
-
- b->resource->MockClearBufferedRanges();
- // First two frames in decoding + first audio frame
- b->resource->MockAddBufferedRange(48, 5503); // Video 1
- b->resource->MockAddBufferedRange(5503, 5648); // Video 2
- b->resource->MockAddBufferedRange(6228, 6803); // Video 3
-
- // Audio - 5 frames; 0 - 139206 us
- b->resource->MockAddBufferedRange(5648, 6228);
- b->resource->MockAddBufferedRange(6803, 7383);
- b->resource->MockAddBufferedRange(7618, 8199);
- b->resource->MockAddBufferedRange(8199, 8779);
- b->resource->MockAddBufferedRange(8962, 9563);
- b->resource->MockAddBufferedRange(9734, 10314);
- b->resource->MockAddBufferedRange(10314, 10895);
- b->resource->MockAddBufferedRange(11207, 11787);
- b->resource->MockAddBufferedRange(12035, 12616);
- b->resource->MockAddBufferedRange(12616, 13196);
- b->resource->MockAddBufferedRange(13220, 13901);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(2U, ranges.Length());
-
- EXPECT_NEAR(166.0 / 2500.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(332.0 / 2500.0, ranges.End(0).ToSeconds(), 0.000001);
-
- EXPECT_NEAR(581.0 / 2500.0, ranges.Start(1).ToSeconds(), 0.000001);
- EXPECT_NEAR(11255.0 / 44100.0, ranges.End(1).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, Normalised)
-{
- RefPtr<TestBinding> b = new TestBinding("mediasource_test.mp4");
- b->Init();
-
- // The first 5 video samples of this file are:
- // Video timescale=2500
- // Frame Start Size Time Duration Sync
- // 1 48 5455 166 83 Yes
- // 2 5503 145 249 83
- // 3 6228 575 581 83
- // 4 7383 235 415 83
- // 5 8779 183 332 83
- // 6 9543 191 498 83
- //
- // Audio timescale=44100
- // 1 5648 580 0 1024 Yes
- // 2 6803 580 1024 1058 Yes
- // 3 7618 581 2082 1014 Yes
- // 4 8199 580 3096 1015 Yes
- // 5 8962 581 4111 1014 Yes
- // 6 9734 580 5125 1014 Yes
- // 7 10314 581 6139 1059 Yes
- // 8 11207 580 7198 1014 Yes
- // 9 12035 581 8212 1014 Yes
- // 10 12616 580 9226 1015 Yes
- // 11 13220 581 10241 1014 Yes
-
- b->resource->MockClearBufferedRanges();
- b->resource->MockAddBufferedRange(48, 13901);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
-
- EXPECT_NEAR(166.0 / 2500.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(11255.0 / 44100.0, ranges.End(0).ToSeconds(), 0.000001);
-}
diff --git a/dom/media/gtest/TestRust.cpp b/dom/media/gtest/TestRust.cpp
deleted file mode 100644
index 86d0e99b8..000000000
--- a/dom/media/gtest/TestRust.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <stdint.h>
-#include "gtest/gtest.h"
-
-extern "C" uint8_t* test_rust();
-
-TEST(rust, CallFromCpp) {
- auto greeting = test_rust();
- EXPECT_STREQ(reinterpret_cast<char*>(greeting), "hello from rust.");
-}
diff --git a/dom/media/gtest/hello.rs b/dom/media/gtest/hello.rs
deleted file mode 100644
index cd111882a..000000000
--- a/dom/media/gtest/hello.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-#[no_mangle]
-pub extern fn test_rust() -> *const u8 {
- // NB: rust &str aren't null terminated.
- let greeting = "hello from rust.\0";
- greeting.as_ptr()
-}
diff --git a/dom/media/gtest/moz.build b/dom/media/gtest/moz.build
index d5d02bced..a7ea73807 100644
--- a/dom/media/gtest/moz.build
+++ b/dom/media/gtest/moz.build
@@ -21,7 +21,6 @@ UNIFIED_SOURCES += [
'TestMozPromise.cpp',
'TestMP3Demuxer.cpp',
'TestMP4Demuxer.cpp',
- # 'TestMP4Reader.cpp', disabled so we can turn check tests back on (bug 1175752)
'TestTrackEncoder.cpp',
'TestVideoSegment.cpp',
'TestVideoUtils.cpp',
diff --git a/dom/media/mediasource/ContainerParser.cpp b/dom/media/mediasource/ContainerParser.cpp
index 4ae37d7e9..b4dcfde8a 100644
--- a/dom/media/mediasource/ContainerParser.cpp
+++ b/dom/media/mediasource/ContainerParser.cpp
@@ -697,6 +697,9 @@ ContainerParser::CreateForMIMEType(const nsACString& aType)
if (aType.LowerCaseEqualsLiteral("video/webm") || aType.LowerCaseEqualsLiteral("audio/webm")) {
return new WebMContainerParser(aType);
}
+ if (aType.LowerCaseEqualsLiteral("video/x-matroska") || aType.LowerCaseEqualsLiteral("audio/x-matroska")) {
+ return new WebMContainerParser(aType);
+ }
#ifdef MOZ_FMP4
if (aType.LowerCaseEqualsLiteral("video/mp4") || aType.LowerCaseEqualsLiteral("audio/mp4")) {
diff --git a/dom/media/mediasource/MediaSource.cpp b/dom/media/mediasource/MediaSource.cpp
index af541bbbb..1c276cdc1 100644
--- a/dom/media/mediasource/MediaSource.cpp
+++ b/dom/media/mediasource/MediaSource.cpp
@@ -62,8 +62,6 @@ namespace mozilla {
// Returns true if we should enable MSE webm regardless of preferences.
// 1. If MP4/H264 isn't supported:
-// * Windows XP
-// * Windows Vista and Server 2008 without the optional "Platform Update Supplement"
// * N/KN editions (Europe and Korea) of Windows 7/8/8.1/10 without the
// optional "Windows Media Feature Pack"
// 2. If H264 hardware acceleration is not available.
@@ -112,14 +110,16 @@ MediaSource::IsTypeSupported(const nsAString& aType, DecoderDoctorDiagnostics* a
}
return NS_OK;
}
- if (mimeType.EqualsASCII("video/webm")) {
+ if (mimeType.EqualsASCII("video/webm") ||
+ mimeType.EqualsASCII("video/x-matroska")) {
if (!(Preferences::GetBool("media.mediasource.webm.enabled", false) ||
IsWebMForced(aDiagnostics))) {
return NS_ERROR_DOM_NOT_SUPPORTED_ERR;
}
return NS_OK;
}
- if (mimeType.EqualsASCII("audio/webm")) {
+ if (mimeType.EqualsASCII("audio/webm") ||
+ mimeType.EqualsASCII("audio/x-matroska")) {
if (!(Preferences::GetBool("media.mediasource.webm.enabled", false) ||
Preferences::GetBool("media.mediasource.webm.audio.enabled", true))) {
return NS_ERROR_DOM_NOT_SUPPORTED_ERR;
diff --git a/dom/media/mediasource/TrackBuffersManager.cpp b/dom/media/mediasource/TrackBuffersManager.cpp
index ac6d82411..21fb158b5 100644
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -814,6 +814,8 @@ TrackBuffersManager::CreateDemuxerforMIMEType()
ShutdownDemuxers();
if (mType.LowerCaseEqualsLiteral("video/webm") ||
+ mType.LowerCaseEqualsLiteral("video/x-matroska") ||
+ mType.LowerCaseEqualsLiteral("audio/x-matroska") ||
mType.LowerCaseEqualsLiteral("audio/webm")) {
mInputDemuxer = new WebMDemuxer(mCurrentInputBuffer, true /* IsMediaSource*/ );
return;
diff --git a/dom/media/mediasource/moz.build b/dom/media/mediasource/moz.build
index 6ded1875d..a1689c216 100644
--- a/dom/media/mediasource/moz.build
+++ b/dom/media/mediasource/moz.build
@@ -38,9 +38,6 @@ TEST_DIRS += [
'gtest',
]
-if CONFIG['MOZ_GONK_MEDIACODEC']:
- DEFINES['MOZ_GONK_MEDIACODEC'] = True
-
include('/ipc/chromium/chromium-config.mozbuild')
FINAL_LIBRARY = 'xul'
diff --git a/dom/media/moz.build b/dom/media/moz.build
index 4d036a5f6..df8cb619d 100644
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -30,6 +30,7 @@ DIRS += [
'ipc',
'mediasink',
'mediasource',
+ 'mp3',
'ogg',
'platforms',
'systemservices',
@@ -42,12 +43,6 @@ DIRS += [
'standalone',
]
-if CONFIG['MOZ_DIRECTSHOW']:
- DIRS += ['directshow']
-
-if CONFIG['MOZ_ANDROID_OMX']:
- DIRS += ['android']
-
if CONFIG['MOZ_FMP4']:
DIRS += ['fmp4']
@@ -128,9 +123,6 @@ EXPORTS += [
'MediaTimer.h',
'MediaTrack.h',
'MediaTrackList.h',
- 'MP3Decoder.h',
- 'MP3Demuxer.h',
- 'MP3FrameParser.h',
'NextFrameSeekTask.h',
'nsIDocumentActivity.h',
'PrincipalChangeObserver.h',
@@ -237,9 +229,6 @@ UNIFIED_SOURCES += [
'MediaTimer.cpp',
'MediaTrack.cpp',
'MediaTrackList.cpp',
- 'MP3Decoder.cpp',
- 'MP3Demuxer.cpp',
- 'MP3FrameParser.cpp',
'NextFrameSeekTask.cpp',
'QueueObject.cpp',
'SeekJob.cpp',
@@ -294,11 +283,6 @@ LOCAL_INCLUDES += [
'/netwerk/base',
]
-if CONFIG['MOZ_DIRECTSHOW']:
- LOCAL_INCLUDES += [
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows',
- ]
-
if CONFIG['MOZ_WEBRTC']:
LOCAL_INCLUDES += [
'/media/webrtc/signaling/src/common',
diff --git a/dom/media/MP3Decoder.cpp b/dom/media/mp3/MP3Decoder.cpp
index b71111e79..074a0866d 100644
--- a/dom/media/MP3Decoder.cpp
+++ b/dom/media/mp3/MP3Decoder.cpp
@@ -24,7 +24,7 @@ MP3Decoder::Clone(MediaDecoderOwner* aOwner) {
MediaDecoderStateMachine*
MP3Decoder::CreateStateMachine() {
RefPtr<MediaDecoderReader> reader =
- new MediaFormatReader(this, new mp3::MP3Demuxer(GetResource()));
+ new MediaFormatReader(this, new MP3Demuxer(GetResource()));
return new MediaDecoderStateMachine(this, reader);
}
diff --git a/dom/media/MP3Decoder.h b/dom/media/mp3/MP3Decoder.h
index 887251065..887251065 100644
--- a/dom/media/MP3Decoder.h
+++ b/dom/media/mp3/MP3Decoder.h
diff --git a/dom/media/MP3Demuxer.cpp b/dom/media/mp3/MP3Demuxer.cpp
index 7d478a41b..5a98cabfe 100644
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/mp3/MP3Demuxer.cpp
@@ -33,7 +33,6 @@ using mozilla::media::TimeIntervals;
using mp4_demuxer::ByteReader;
namespace mozilla {
-namespace mp3 {
// MP3Demuxer
@@ -1338,5 +1337,4 @@ ID3Parser::ID3Header::Update(uint8_t c) {
return IsValid(mPos++);
}
-} // namespace mp3
} // namespace mozilla
diff --git a/dom/media/MP3Demuxer.h b/dom/media/mp3/MP3Demuxer.h
index 03e67b0d9..5331c4d54 100644
--- a/dom/media/MP3Demuxer.h
+++ b/dom/media/mp3/MP3Demuxer.h
@@ -13,7 +13,6 @@
#include <vector>
namespace mozilla {
-namespace mp3 {
class MP3TrackDemuxer;
@@ -468,7 +467,6 @@ private:
UniquePtr<AudioInfo> mInfo;
};
-} // namespace mp3
} // namespace mozilla
#endif
diff --git a/dom/media/mp3/moz.build b/dom/media/mp3/moz.build
new file mode 100644
index 000000000..596d061f8
--- /dev/null
+++ b/dom/media/mp3/moz.build
@@ -0,0 +1,17 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'MP3Decoder.h',
+ 'MP3Demuxer.h',
+]
+
+UNIFIED_SOURCES += [
+ 'MP3Decoder.cpp',
+ 'MP3Demuxer.cpp',
+]
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/ogg/OggDemuxer.cpp b/dom/media/ogg/OggDemuxer.cpp
index 591a5248f..0cc484687 100644
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -12,7 +12,6 @@
#include "mozilla/Atomics.h"
#include "mozilla/PodOperations.h"
#include "mozilla/SharedThreadPool.h"
-#include "mozilla/Telemetry.h"
#include "mozilla/TimeStamp.h"
#include "MediaDataDemuxer.h"
#include "nsAutoRef.h"
@@ -164,7 +163,6 @@ OggDemuxer::~OggDemuxer()
MOZ_LOG(gMediaDemuxerLog, mozilla::LogLevel::Debug,
("OggDemuxer(%p)::%s: Reporting telemetry MEDIA_OGG_LOADED_IS_CHAINED=%d",
ptr, __func__, isChained));
- Telemetry::Accumulate(Telemetry::ID::MEDIA_OGG_LOADED_IS_CHAINED, isChained);
});
AbstractThread::MainThread()->Dispatch(task.forget());
}
diff --git a/dom/media/platforms/MediaTelemetryConstants.h b/dom/media/platforms/MediaTelemetryConstants.h
deleted file mode 100644
index 5024949a8..000000000
--- a/dom/media/platforms/MediaTelemetryConstants.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-#ifndef dom_media_platforms_MediaTelemetryConstants_h___
-#define dom_media_platforms_MediaTelemetryConstants_h___
-
-namespace mozilla {
-namespace media {
-
-enum class MediaDecoderBackend : uint32_t
-{
- WMFSoftware = 0,
- WMFDXVA2D3D9 = 1,
- WMFDXVA2D3D11 = 2
-};
-
-} // namespace media
-} // namespace mozilla
-
-#endif // dom_media_platforms_MediaTelemetryConstants_h___
diff --git a/dom/media/platforms/PDMFactory.cpp b/dom/media/platforms/PDMFactory.cpp
index c1e58fdc2..5bfdcffb7 100644
--- a/dom/media/platforms/PDMFactory.cpp
+++ b/dom/media/platforms/PDMFactory.cpp
@@ -19,9 +19,6 @@
#ifdef MOZ_APPLEMEDIA
#include "AppleDecoderModule.h"
#endif
-#ifdef MOZ_GONK_MEDIACODEC
-#include "GonkDecoderModule.h"
-#endif
#ifdef MOZ_WIDGET_ANDROID
#include "AndroidDecoderModule.h"
#endif
@@ -390,12 +387,6 @@ PDMFactory::CreatePDMs()
m = new AppleDecoderModule();
StartupPDM(m);
#endif
-#ifdef MOZ_GONK_MEDIACODEC
- if (MediaPrefs::PDMGonkDecoderEnabled()) {
- m = new GonkDecoderModule();
- StartupPDM(m);
- }
-#endif
#ifdef MOZ_WIDGET_ANDROID
if(MediaPrefs::PDMAndroidMediaCodecEnabled()){
m = new AndroidDecoderModule();
diff --git a/dom/media/platforms/agnostic/AOMDecoder.cpp b/dom/media/platforms/agnostic/AOMDecoder.cpp
new file mode 100644
index 000000000..b5d21375e
--- /dev/null
+++ b/dom/media/platforms/agnostic/AOMDecoder.cpp
@@ -0,0 +1,332 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AOMDecoder.h"
+#include "MediaResult.h"
+#include "TimeUnits.h"
+#include "aom/aomdx.h"
+#include "aom/aom_image.h"
+#include "gfx2DGlue.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+#include "nsError.h"
+#include "prsystem.h"
+
+#include <algorithm>
+
+#undef LOG
+#define LOG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("AOMDecoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+namespace mozilla {
+
+using namespace gfx;
+using namespace layers;
+
+AOMDecoder::AOMDecoder(const CreateDecoderParams& aParams)
+ : mImageContainer(aParams.mImageContainer)
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(aParams.mCallback)
+ , mIsFlushing(false)
+ , mInfo(aParams.VideoConfig())
+{
+ PodZero(&mCodec);
+}
+
+AOMDecoder::~AOMDecoder()
+{
+}
+
+void
+AOMDecoder::Shutdown()
+{
+ aom_codec_destroy(&mCodec);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+AOMDecoder::Init()
+{
+ int decode_threads = 2;
+
+ aom_codec_iface_t* dx = aom_codec_av1_dx();
+ if (mInfo.mDisplay.width >= 2048) {
+ decode_threads = 8;
+ }
+ else if (mInfo.mDisplay.width >= 1024) {
+ decode_threads = 4;
+ }
+ decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors());
+
+ aom_codec_dec_cfg_t config;
+ PodZero(&config);
+ config.threads = decode_threads;
+ config.w = config.h = 0; // set after decode
+ config.allow_lowbitdepth = true;
+
+ aom_codec_flags_t flags = 0;
+
+ if (!dx || aom_codec_dec_init(&mCodec, dx, &config, flags)) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+}
+
+void
+AOMDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
+ // nothing to do for now.
+ });
+ SyncRunnable::DispatchToThread(mTaskQueue, r);
+ mIsFlushing = false;
+}
+
+// Ported from third_party/aom/tools_common.c.
+static aom_codec_err_t
+highbd_img_downshift(aom_image_t *dst, aom_image_t *src, int down_shift) {
+ int plane;
+ if (dst->d_w != src->d_w || dst->d_h != src->d_h)
+ return AOM_CODEC_INVALID_PARAM;
+ if (dst->x_chroma_shift != src->x_chroma_shift)
+ return AOM_CODEC_INVALID_PARAM;
+ if (dst->y_chroma_shift != src->y_chroma_shift)
+ return AOM_CODEC_INVALID_PARAM;
+ if (dst->fmt != (src->fmt & ~AOM_IMG_FMT_HIGHBITDEPTH))
+ return AOM_CODEC_INVALID_PARAM;
+ if (down_shift < 0)
+ return AOM_CODEC_INVALID_PARAM;
+ switch (dst->fmt) {
+ case AOM_IMG_FMT_I420:
+ case AOM_IMG_FMT_I422:
+ case AOM_IMG_FMT_I444:
+ break;
+ default:
+ return AOM_CODEC_INVALID_PARAM;
+ }
+ switch (src->fmt) {
+ case AOM_IMG_FMT_I42016:
+ case AOM_IMG_FMT_I42216:
+ case AOM_IMG_FMT_I44416:
+ break;
+ default:
+ // We don't support anything that's not 16 bit
+ return AOM_CODEC_UNSUP_BITSTREAM;
+ }
+ for (plane = 0; plane < 3; plane++) {
+ int w = src->d_w;
+ int h = src->d_h;
+ int x, y;
+ if (plane) {
+ w = (w + src->x_chroma_shift) >> src->x_chroma_shift;
+ h = (h + src->y_chroma_shift) >> src->y_chroma_shift;
+ }
+ for (y = 0; y < h; y++) {
+ uint16_t *p_src =
+ (uint16_t *)(src->planes[plane] + y * src->stride[plane]);
+ uint8_t *p_dst =
+ dst->planes[plane] + y * dst->stride[plane];
+ for (x = 0; x < w; x++) *p_dst++ = (*p_src++ >> down_shift) & 0xFF;
+ }
+ }
+ return AOM_CODEC_OK;
+}
+
+// UniquePtr dtor wrapper for aom_image_t.
+struct AomImageFree {
+ void operator()(aom_image_t* img) { aom_img_free(img); }
+};
+
+MediaResult
+AOMDecoder::DoDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+#if defined(DEBUG)
+ NS_ASSERTION(IsKeyframe(*aSample) == aSample->mKeyframe,
+ "AOM Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
+#endif
+
+ if (aom_codec_err_t r = aom_codec_decode(&mCodec, aSample->Data(), aSample->Size(), nullptr)) {
+ LOG("AOM Decode error: %s", aom_codec_err_to_string(r));
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("AOM error decoding AV1 sample: %s", aom_codec_err_to_string(r)));
+ }
+
+ aom_codec_iter_t iter = nullptr;
+ aom_image_t *img;
+ UniquePtr<aom_image_t, AomImageFree> img8;
+
+ while ((img = aom_codec_get_frame(&mCodec, &iter))) {
+ // Track whether the underlying buffer is 8 or 16 bits per channel.
+ bool highbd = bool(img->fmt & AOM_IMG_FMT_HIGHBITDEPTH);
+ if (highbd) {
+ // Downsample images with more than 8 bits per channel.
+ aom_img_fmt_t fmt8 = static_cast<aom_img_fmt_t>(img->fmt ^ AOM_IMG_FMT_HIGHBITDEPTH);
+ img8.reset(aom_img_alloc(NULL, fmt8, img->d_w, img->d_h, 16));
+ if (img8 == nullptr) {
+ LOG("Couldn't allocate bitdepth reduction target!");
+ return MediaResult(
+ NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Couldn't allocate conversion buffer for AV1 frame"));
+ }
+ if (aom_codec_err_t r = highbd_img_downshift(img8.get(), img, img->bit_depth - 8)) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Error converting AV1 frame to 8 bits: %s",
+ aom_codec_err_to_string(r)));
+ }
+ // img normally points to storage owned by mCodec, so it is not freed.
+ // To copy out the contents of img8 we can overwrite img with an alias.
+ // Since img is assigned at the start of the while loop and img8 is held
+ // outside that loop, the alias won't outlive the storage it points to.
+ img = img8.get();
+ highbd = false;
+ }
+
+ NS_ASSERTION(img->fmt == AOM_IMG_FMT_I420 ||
+ img->fmt == AOM_IMG_FMT_I42016 ||
+ img->fmt == AOM_IMG_FMT_I444 ||
+ img->fmt == AOM_IMG_FMT_I44416,
+ "AV1 image format not I420 or I444");
+
+ // Chroma shifts are rounded down as per the decoding examples in the SDK
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = img->planes[0];
+ b.mPlanes[0].mStride = img->stride[0];
+ b.mPlanes[0].mHeight = img->d_h;
+ b.mPlanes[0].mWidth = img->d_w;
+ b.mPlanes[0].mOffset = 0;
+ b.mPlanes[0].mSkip = highbd ? 1 : 0;
+
+ b.mPlanes[1].mData = img->planes[1];
+ b.mPlanes[1].mStride = img->stride[1];
+ b.mPlanes[1].mOffset = 0;
+ b.mPlanes[1].mSkip = highbd ? 1 : 0;
+
+ b.mPlanes[2].mData = img->planes[2];
+ b.mPlanes[2].mStride = img->stride[2];
+ b.mPlanes[2].mOffset = 0;
+ b.mPlanes[2].mSkip = highbd ? 1 : 0;
+
+ if (img->fmt == AOM_IMG_FMT_I420 ||
+ img->fmt == AOM_IMG_FMT_I42016) {
+ b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+
+ b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+ } else if (img->fmt == AOM_IMG_FMT_I444) {
+ b.mPlanes[1].mHeight = img->d_h;
+ b.mPlanes[1].mWidth = img->d_w;
+
+ b.mPlanes[2].mHeight = img->d_h;
+ b.mPlanes[2].mWidth = img->d_w;
+ } else {
+ LOG("AOM Unknown image format");
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("AOM Unknown image format"));
+ }
+
+ RefPtr<VideoData> v =
+ VideoData::CreateAndCopyData(mInfo,
+ mImageContainer,
+ aSample->mOffset,
+ aSample->mTime,
+ aSample->mDuration,
+ b,
+ aSample->mKeyframe,
+ aSample->mTimecode,
+ mInfo.ScaledImageRect(img->d_w,
+ img->d_h));
+
+ if (!v) {
+ LOG("Image allocation error source %ux%u display %ux%u picture %ux%u",
+ img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
+ mInfo.mImage.width, mInfo.mImage.height);
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ mCallback->Output(v);
+ }
+ return NS_OK;
+}
+
+void
+AOMDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mIsFlushing) {
+ return;
+ }
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ } else {
+ mCallback->InputExhausted();
+ }
+}
+
+void
+AOMDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &AOMDecoder::ProcessDecode, aSample));
+}
+
+void
+AOMDecoder::ProcessDrain()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mCallback->DrainComplete();
+}
+
+void
+AOMDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &AOMDecoder::ProcessDrain));
+}
+
+/* static */
+bool
+AOMDecoder::IsAV1(const nsACString& aMimeType)
+{
+ return aMimeType.EqualsLiteral("video/webm; codecs=av1") ||
+ aMimeType.EqualsLiteral("video/av1");
+}
+
+/* static */
+bool
+AOMDecoder::IsKeyframe(Span<const uint8_t> aBuffer) {
+ aom_codec_stream_info_t info;
+ PodZero(&info);
+
+ aom_codec_peek_stream_info(aom_codec_av1_dx(),
+ aBuffer.Elements(),
+ aBuffer.Length(),
+ &info);
+
+ return bool(info.is_kf);
+}
+
+/* static */
+nsIntSize
+AOMDecoder::GetFrameSize(Span<const uint8_t> aBuffer) {
+ aom_codec_stream_info_t info;
+ PodZero(&info);
+
+ aom_codec_peek_stream_info(aom_codec_av1_dx(),
+ aBuffer.Elements(),
+ aBuffer.Length(),
+ &info);
+
+ return nsIntSize(info.w, info.h);
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/AOMDecoder.h b/dom/media/platforms/agnostic/AOMDecoder.h
new file mode 100644
index 000000000..ec6b1f95a
--- /dev/null
+++ b/dom/media/platforms/agnostic/AOMDecoder.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(AOMDecoder_h_)
+#define AOMDecoder_h_
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/Span.h"
+
+#include <stdint.h>
+#include "aom/aom_decoder.h"
+
+namespace mozilla {
+
+class AOMDecoder : public MediaDataDecoder
+{
+public:
+ explicit AOMDecoder(const CreateDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "libaom (AV1) video decoder";
+ }
+
+ // Return true if aMimeType is a one of the strings used
+ // by our demuxers to identify AV1 streams.
+ static bool IsAV1(const nsACString& aMimeType);
+
+ // Return true if a sample is a keyframe.
+ static bool IsKeyframe(Span<const uint8_t> aBuffer);
+
+ // Return the frame dimensions for a sample.
+ static nsIntSize GetFrameSize(Span<const uint8_t> aBuffer);
+
+private:
+ ~AOMDecoder();
+ void ProcessDecode(MediaRawData* aSample);
+ MediaResult DoDecode(MediaRawData* aSample);
+ void ProcessDrain();
+
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+ Atomic<bool> mIsFlushing;
+
+ // AOM decoder state
+ aom_codec_ctx_t mCodec;
+
+ const VideoInfo& mInfo;
+};
+
+} // namespace mozilla
+
+#endif // AOMDecoder_h_
diff --git a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
index 7bd75b7fe..cf5ed3d22 100644
--- a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
@@ -5,6 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AgnosticDecoderModule.h"
+#include "MediaPrefs.h"
#include "mozilla/Logging.h"
#include "OpusDecoder.h"
#include "VorbisDecoder.h"
@@ -12,6 +13,10 @@
#include "WAVDecoder.h"
#include "TheoraDecoder.h"
+#ifdef MOZ_AV1
+#include "AOMDecoder.h"
+#endif
+
namespace mozilla {
bool
@@ -24,6 +29,11 @@ AgnosticDecoderModule::SupportsMimeType(const nsACString& aMimeType,
VorbisDataDecoder::IsVorbis(aMimeType) ||
WaveDataDecoder::IsWave(aMimeType) ||
TheoraDecoder::IsTheora(aMimeType);
+#ifdef MOZ_AV1
+ if (MediaPrefs::AV1Enabled()) {
+ supports |= AOMDecoder::IsAV1(aMimeType);
+ }
+#endif
MOZ_LOG(sPDMLog, LogLevel::Debug, ("Agnostic decoder %s requested type",
supports ? "supports" : "rejects"));
return supports;
@@ -36,7 +46,14 @@ AgnosticDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
if (VPXDecoder::IsVPX(aParams.mConfig.mMimeType)) {
m = new VPXDecoder(aParams);
- } else if (TheoraDecoder::IsTheora(aParams.mConfig.mMimeType)) {
+ }
+#ifdef MOZ_AV1
+ else if (AOMDecoder::IsAV1(aParams.mConfig.mMimeType) &&
+ MediaPrefs::AV1Enabled()) {
+ m = new AOMDecoder(aParams);
+ }
+#endif
+ else if (TheoraDecoder::IsTheora(aParams.mConfig.mMimeType)) {
m = new TheoraDecoder(aParams);
}
diff --git a/dom/media/platforms/agnostic/VPXDecoder.cpp b/dom/media/platforms/agnostic/VPXDecoder.cpp
index 77c81b51b..f2f84487f 100644
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -22,7 +22,7 @@ namespace mozilla {
using namespace gfx;
using namespace layers;
-static int MimeTypeToCodec(const nsACString& aMimeType)
+static VPXDecoder::Codec MimeTypeToCodec(const nsACString& aMimeType)
{
if (aMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
return VPXDecoder::Codec::VP8;
@@ -31,7 +31,7 @@ static int MimeTypeToCodec(const nsACString& aMimeType)
} else if (aMimeType.EqualsLiteral("video/vp9")) {
return VPXDecoder::Codec::VP9;
}
- return -1;
+ return VPXDecoder::Codec::Unknown;
}
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
@@ -101,17 +101,10 @@ MediaResult
VPXDecoder::DoDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
#if defined(DEBUG)
- vpx_codec_stream_info_t si;
- PodZero(&si);
- si.sz = sizeof(si);
- if (mCodec == Codec::VP8) {
- vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aSample->Data(), aSample->Size(), &si);
- } else if (mCodec == Codec::VP9) {
- vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->Size(), &si);
- }
- NS_ASSERTION(bool(si.is_kf) == aSample->mKeyframe,
- "VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
+ NS_ASSERTION(IsKeyframe(*aSample, mCodec) == aSample->mKeyframe,
+ "VPX Decode Keyframe error sample->mKeyframe and sample data out of sync");
#endif
if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
@@ -249,5 +242,41 @@ VPXDecoder::IsVP9(const nsACString& aMimeType)
return IsVPX(aMimeType, VPXDecoder::VP9);
}
+/* static */
+bool
+VPXDecoder::IsKeyframe(Span<const uint8_t> aBuffer, Codec aCodec)
+{
+ vpx_codec_stream_info_t si;
+ PodZero(&si);
+ si.sz = sizeof(si);
+
+ if (aCodec == Codec::VP8) {
+ vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aBuffer.Elements(), aBuffer.Length(), &si);
+ return bool(si.is_kf);
+ } else if (aCodec == Codec::VP9) {
+ vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aBuffer.Elements(), aBuffer.Length(), &si);
+ return bool(si.is_kf);
+ }
+
+ return false;
+}
+
+/* static */
+nsIntSize
+VPXDecoder::GetFrameSize(Span<const uint8_t> aBuffer, Codec aCodec)
+{
+ vpx_codec_stream_info_t si;
+ PodZero(&si);
+ si.sz = sizeof(si);
+
+ if (aCodec == Codec::VP8) {
+ vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aBuffer.Elements(), aBuffer.Length(), &si);
+ } else if (aCodec == Codec::VP9) {
+ vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aBuffer.Elements(), aBuffer.Length(), &si);
+ }
+
+ return nsIntSize(si.w, si.h);
+}
+
} // namespace mozilla
#undef LOG
diff --git a/dom/media/platforms/agnostic/VPXDecoder.h b/dom/media/platforms/agnostic/VPXDecoder.h
index d420ec069..4e8d83617 100644
--- a/dom/media/platforms/agnostic/VPXDecoder.h
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -7,6 +7,7 @@
#define VPXDecoder_h_
#include "PlatformDecoderModule.h"
+#include "mozilla/Span.h"
#include <stdint.h>
#define VPX_DONT_DEFINE_STDINT_TYPES
@@ -36,7 +37,8 @@ public:
enum Codec: uint8_t {
VP8 = 1 << 0,
- VP9 = 1 << 1
+ VP9 = 1 << 1,
+ Unknown = 1 << 7,
};
// Return true if aMimeType is a one of the strings used by our demuxers to
@@ -46,6 +48,12 @@ public:
static bool IsVP8(const nsACString& aMimeType);
static bool IsVP9(const nsACString& aMimeType);
+ // Return true if a sample is a keyframe for the specified codec.
+ static bool IsKeyframe(Span<const uint8_t> aBuffer, Codec aCodec);
+
+ // Return the frame dimensions for a sample for the specified codec.
+ static nsIntSize GetFrameSize(Span<const uint8_t> aBuffer, Codec aCodec);
+
private:
void ProcessDecode(MediaRawData* aSample);
MediaResult DoDecode(MediaRawData* aSample);
@@ -61,7 +69,7 @@ private:
const VideoInfo& mInfo;
- const int mCodec;
+ const Codec mCodec;
};
} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
index 8cb5c8578..8655ce25f 100644
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -69,12 +69,21 @@ FFmpegDataDecoder<LIBAV_VER>::InitDecoder()
mCodecContext->extradata_size = mExtraData->Length();
// FFmpeg may use SIMD instructions to access the data which reads the
// data in 32 bytes block. Must ensure we have enough data to read.
+ uint32_t padding_size =
#if LIBAVCODEC_VERSION_MAJOR >= 58
- mExtraData->AppendElements(AV_INPUT_BUFFER_PADDING_SIZE);
+ AV_INPUT_BUFFER_PADDING_SIZE;
#else
- mExtraData->AppendElements(FF_INPUT_BUFFER_PADDING_SIZE);
+ FF_INPUT_BUFFER_PADDING_SIZE;
#endif
- mCodecContext->extradata = mExtraData->Elements();
+ mCodecContext->extradata = static_cast<uint8_t*>(
+ mLib->av_malloc(mExtraData->Length() + padding_size));
+ if (!mCodecContext->extradata) {
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Couldn't init ffmpeg extradata"));
+ }
+ memcpy(mCodecContext->extradata,
+ mExtraData->Elements(),
+ mExtraData->Length());
} else {
mCodecContext->extradata_size = 0;
}
@@ -165,6 +174,9 @@ FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown()
StaticMutexAutoLock mon(sMonitor);
if (mCodecContext) {
+ if (mCodecContext->extradata) {
+ mLib->av_freep(&mCodecContext->extradata);
+ }
mLib->avcodec_close(mCodecContext);
mLib->av_freep(&mCodecContext);
#if LIBAVCODEC_VERSION_MAJOR >= 55
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
index 6302882a6..426e9f74b 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -7,6 +7,7 @@
#include "MediaPrefs.h"
#include "mozilla/PodOperations.h"
#include "mozilla/Types.h"
+#include "PlatformDecoderModule.h"
#include "prlink.h"
#define AV_LOG_DEBUG 48
@@ -144,6 +145,8 @@ FFmpegLibWrapper::Link()
AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_free_frame, AV_FUNC_54)
+ AV_FUNC(avcodec_send_packet, AV_FUNC_58)
+ AV_FUNC(avcodec_receive_frame, AV_FUNC_58)
AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_freep, AV_FUNC_AVUTIL_ALL)
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
index d6944a1d8..b968edd32 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
@@ -5,6 +5,7 @@
#ifndef __FFmpegLibWrapper_h__
#define __FFmpegLibWrapper_h__
+#include "mozilla/Attributes.h"
#include "mozilla/Types.h"
struct AVCodec;
@@ -70,6 +71,10 @@ struct FFmpegLibWrapper
// libavcodec v54 only
void (*avcodec_free_frame)(AVFrame** frame);
+ // libavcodec v58 and later only
+ int (*avcodec_send_packet)(AVCodecContext* avctx, const AVPacket* avpkt);
+ int (*avcodec_receive_frame)(AVCodecContext* avctx, AVFrame* frame);
+
// libavutil
void (*av_log_set_level)(int level);
void* (*av_malloc)(size_t size);
@@ -91,4 +96,4 @@ private:
} // namespace mozilla
-#endif // FFmpegLibWrapper \ No newline at end of file
+#endif // FFmpegLibWrapper
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
index aec1e9136..f3101e44c 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -174,12 +174,15 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
size_t inputSize = aSample->Size();
-#if LIBAVCODEC_VERSION_MAJOR >= 54
+#if LIBAVCODEC_VERSION_MAJOR >= 54 && LIBAVCODEC_VERSION_MAJOR < 58
if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 55 && LIBAVCODEC_VERSION_MAJOR < 58
|| mCodecID == AV_CODEC_ID_VP9
#endif
- )) {
+ ))
+#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ {
while (inputSize) {
uint8_t* data = inputData;
int size = inputSize;
@@ -224,6 +227,48 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
packet.pos = aSample->mOffset;
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+ packet.duration = aSample->mDuration;
+ int res = mLib->avcodec_send_packet(mCodecContext, &packet);
+ if (res < 0) {
+ // In theory, avcodec_send_packet could sent -EAGAIN should its internal
+ // buffers be full. In practice this can't happen as we only feed one frame
+ // at a time, and we immediately call avcodec_receive_frame right after.
+ FFMPEG_LOG("avcodec_send_packet error: %d", res);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("avcodec_send_packet error: %d", res));
+ }
+
+ if (aGotFrame) {
+ *aGotFrame = false;
+ }
+ do {
+ if (!PrepareFrame()) {
+ NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ res = mLib->avcodec_receive_frame(mCodecContext, mFrame);
+ if (res == int(AVERROR_EOF)) {
+ return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
+ }
+ if (res == AVERROR(EAGAIN)) {
+ return NS_OK;
+ }
+ if (res < 0) {
+ FFMPEG_LOG("avcodec_receive_frame error: %d", res);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("avcodec_receive_frame error: %d", res));
+ }
+ MediaResult rv = CreateImage(mFrame->pkt_pos, mFrame->pkt_pts,
+ mFrame->pkt_duration);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (aGotFrame) {
+ *aGotFrame = true;
+ }
+ } while (true);
+#else
// LibAV provides no API to retrieve the decoded sample's duration.
// (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
// As such we instead use a map using the dts as key that we will retrieve
@@ -276,8 +321,21 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
// against the map becoming extremely big.
mDurationMap.Clear();
}
+
+ MediaResult rv = CreateImage(aSample->mOffset, pts, duration);
+ if (NS_SUCCEEDED(rv) && aGotFrame) {
+ *aGotFrame = true;
+ }
+ return rv;
+#endif
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::CreateImage(int64_t aOffset, int64_t aPts,
+ int64_t aDuration)
+{
FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
- pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
+ aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = mFrame->data[0];
@@ -317,9 +375,9 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mInfo,
mImageContainer,
- aSample->mOffset,
- pts,
- duration,
+ aOffset,
+ aPts,
+ aDuration,
b,
!!mFrame->key_frame,
-1,
@@ -331,9 +389,6 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RESULT_DETAIL("image allocation error"));
}
mCallback->Output(v);
- if (aGotFrame) {
- *aGotFrame = true;
- }
return NS_OK;
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
index 786df0da1..49a55e8a6 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -49,6 +49,7 @@ private:
MediaResult DoDecode(MediaRawData* aSample) override;
MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame);
MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame);
+ MediaResult CreateImage(int64_t aOffset, int64_t aPts, int64_t aDuration);
void ProcessDrain() override;
void ProcessFlush() override;
void OutputDelayedFrames();
diff --git a/dom/media/platforms/ffmpeg/ffvpx/moz.build b/dom/media/platforms/ffmpeg/ffvpx/moz.build
index aee58b5b0..c0041a4d4 100644
--- a/dom/media/platforms/ffmpeg/ffvpx/moz.build
+++ b/dom/media/platforms/ffmpeg/ffvpx/moz.build
@@ -20,7 +20,7 @@ SOURCES += [
]
LOCAL_INCLUDES += [
'..',
- '../ffmpeg57/include',
+ '../ffmpeg58/include',
]
if CONFIG['OS_ARCH'] == 'WINNT':
diff --git a/dom/media/platforms/moz.build b/dom/media/platforms/moz.build
index 3fb0cc842..f5fb72c5d 100644
--- a/dom/media/platforms/moz.build
+++ b/dom/media/platforms/moz.build
@@ -10,7 +10,6 @@ EXPORTS += [
'agnostic/TheoraDecoder.h',
'agnostic/VorbisDecoder.h',
'agnostic/VPXDecoder.h',
- 'MediaTelemetryConstants.h',
'PDMFactory.h',
'PlatformDecoderModule.h',
'wrappers/FuzzingWrapper.h',
@@ -55,6 +54,14 @@ if CONFIG['MOZ_FFMPEG']:
'ffmpeg',
]
+if CONFIG['MOZ_AV1']:
+ EXPORTS += [
+ 'agnostic/AOMDecoder.h',
+ ]
+ UNIFIED_SOURCES += [
+ 'agnostic/AOMDecoder.cpp',
+ ]
+
if CONFIG['MOZ_APPLEMEDIA']:
EXPORTS += [
'apple/AppleDecoderModule.h',
diff --git a/dom/media/platforms/omx/OmxPlatformLayer.cpp b/dom/media/platforms/omx/OmxPlatformLayer.cpp
index 039b4a22f..15b3062a4 100644
--- a/dom/media/platforms/omx/OmxPlatformLayer.cpp
+++ b/dom/media/platforms/omx/OmxPlatformLayer.cpp
@@ -282,26 +282,7 @@ OmxPlatformLayer::CompressionFormat()
}
}
-// Implementations for different platforms will be defined in their own files.
-#ifdef OMX_PLATFORM_GONK
-
-bool
-OmxPlatformLayer::SupportsMimeType(const nsACString& aMimeType)
-{
- return GonkOmxPlatformLayer::FindComponents(aMimeType);
-}
-
-OmxPlatformLayer*
-OmxPlatformLayer::Create(OmxDataDecoder* aDataDecoder,
- OmxPromiseLayer* aPromiseLayer,
- TaskQueue* aTaskQueue,
- layers::ImageContainer* aImageContainer)
-{
- return new GonkOmxPlatformLayer(aDataDecoder, aPromiseLayer, aTaskQueue, aImageContainer);
-}
-
-#else // For platforms without OMX IL support.
-
+// For platforms without OMX IL support.
bool
OmxPlatformLayer::SupportsMimeType(const nsACString& aMimeType)
{
@@ -317,6 +298,4 @@ OmxPlatformLayer::Create(OmxDataDecoder* aDataDecoder,
return nullptr;
}
-#endif
-
}
diff --git a/dom/media/platforms/wmf/DXVA2Manager.cpp b/dom/media/platforms/wmf/DXVA2Manager.cpp
index 0c1734c54..69e002f7f 100644
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -14,8 +14,6 @@
#include "mozilla/layers/D3D11ShareHandleImage.h"
#include "mozilla/layers/ImageBridgeChild.h"
#include "mozilla/layers/TextureForwarder.h"
-#include "mozilla/Telemetry.h"
-#include "MediaTelemetryConstants.h"
#include "mfapi.h"
#include "gfxPrefs.h"
#include "MFTDecoder.h"
@@ -442,9 +440,6 @@ D3D9DXVA2Manager::Init(layers::KnowsCompositor* aKnowsCompositor,
}
mTextureClientAllocator->SetMaxPoolSize(5);
- Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
- uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D9));
-
return S_OK;
}
@@ -775,9 +770,6 @@ D3D11DXVA2Manager::Init(layers::KnowsCompositor* aKnowsCompositor,
}
mTextureClientAllocator->SetMaxPoolSize(5);
- Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
- uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D11));
-
return S_OK;
}
diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
index 69b62da51..3dacdf0aa 100644
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -252,7 +252,6 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
LOG("Audio MFTDecoder returned success but null output.");
nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([]() -> void {
LOG("Reporting telemetry AUDIO_MFT_OUTPUT_NULL_SAMPLES");
- Telemetry::Accumulate(Telemetry::ID::AUDIO_MFT_OUTPUT_NULL_SAMPLES, 1);
});
AbstractThread::MainThread()->Dispatch(task.forget());
return E_FAIL;
diff --git a/dom/media/platforms/wmf/WMFDecoderModule.h b/dom/media/platforms/wmf/WMFDecoderModule.h
index cd7b8c660..6582f8056 100644
--- a/dom/media/platforms/wmf/WMFDecoderModule.h
+++ b/dom/media/platforms/wmf/WMFDecoderModule.h
@@ -40,10 +40,8 @@ public:
static int GetNumDecoderThreads();
// Accessors that report whether we have the required MFTs available
- // on the system to play various codecs. Windows Vista doesn't have the
- // H.264/AAC decoders if the "Platform Update Supplement for Windows Vista"
- // is not installed, and Window N and KN variants also require a "Media
- // Feature Pack" to be installed. Windows XP doesn't have WMF.
+ // on the system to play various codecs. Windows N and KN variants
+ // require a "Media Feature Pack" to be installed.
static bool HasAAC();
static bool HasH264();
diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
index d2c13eac7..e6dd29c6d 100644
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
@@ -8,7 +8,6 @@
#include "VideoUtils.h"
#include "WMFUtils.h"
#include "nsTArray.h"
-#include "mozilla/Telemetry.h"
#include "mozilla/Logging.h"
#include "mozilla/SyncRunnable.h"
@@ -39,39 +38,6 @@ WMFMediaDataDecoder::Init()
return InitPromise::CreateAndResolve(mMFTManager->GetType(), __func__);
}
-// A single telemetry sample is reported for each MediaDataDecoder object
-// that has detected error or produced output successfully.
-static void
-SendTelemetry(unsigned long hr)
-{
- // Collapse the error codes into a range of 0-0xff that can be viewed in
- // telemetry histograms. For most MF_E_* errors, unique samples are used,
- // retaining the least significant 7 or 8 bits. Other error codes are
- // bucketed.
- uint32_t sample;
- if (SUCCEEDED(hr)) {
- sample = 0;
- } else if (hr < 0xc00d36b0) {
- sample = 1; // low bucket
- } else if (hr < 0xc00d3700) {
- sample = hr & 0xffU; // MF_E_*
- } else if (hr <= 0xc00d3705) {
- sample = 0x80 + (hr & 0xfU); // more MF_E_*
- } else if (hr < 0xc00d6d60) {
- sample = 2; // mid bucket
- } else if (hr <= 0xc00d6d78) {
- sample = hr & 0xffU; // MF_E_TRANSFORM_*
- } else {
- sample = 3; // high bucket
- }
-
- nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
- [sample] {
- Telemetry::Accumulate(Telemetry::MEDIA_WMF_DECODE_ERROR, sample);
- });
- NS_DispatchToMainThread(runnable);
-}
-
void
WMFMediaDataDecoder::Shutdown()
{
@@ -91,9 +57,6 @@ WMFMediaDataDecoder::ProcessShutdown()
if (mMFTManager) {
mMFTManager->Shutdown();
mMFTManager = nullptr;
- if (!mRecordedError && mHasSuccessfulOutput) {
- SendTelemetry(S_OK);
- }
}
}
@@ -125,10 +88,6 @@ WMFMediaDataDecoder::ProcessDecode(MediaRawData* aSample)
NS_WARNING("MFTManager rejected sample");
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("MFTManager::Input:%x", hr)));
- if (!mRecordedError) {
- SendTelemetry(hr);
- mRecordedError = true;
- }
return;
}
@@ -144,7 +103,6 @@ WMFMediaDataDecoder::ProcessOutput()
HRESULT hr = S_OK;
while (SUCCEEDED(hr = mMFTManager->Output(mLastStreamOffset, output)) &&
output) {
- mHasSuccessfulOutput = true;
mCallback->Output(output);
}
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
@@ -153,10 +111,6 @@ WMFMediaDataDecoder::ProcessOutput()
NS_WARNING("WMFMediaDataDecoder failed to output data");
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("MFTManager::Output:%x", hr)));
- if (!mRecordedError) {
- SendTelemetry(hr);
- mRecordedError = true;
- }
}
}
diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.h b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
index a4dd49f56..f869012e7 100644
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -33,7 +33,7 @@ public:
// Returns S_OK on success, or MF_E_TRANSFORM_NEED_MORE_INPUT if there's not
// enough data to produce more output. If this returns a failure code other
// than MF_E_TRANSFORM_NEED_MORE_INPUT, an error will be reported to the
- // MP4Reader.
+ // MP4Demuxer.
virtual HRESULT Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutput) = 0;
@@ -136,10 +136,6 @@ private:
Atomic<bool> mIsFlushing;
bool mIsShutDown;
-
- // For telemetry
- bool mHasSuccessfulOutput = false;
- bool mRecordedError = false;
};
} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
index 291bc5b74..a7633a7de 100644
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -27,7 +27,6 @@
#include "mozilla/WindowsVersion.h"
#include "mozilla/Telemetry.h"
#include "nsPrintfCString.h"
-#include "MediaTelemetryConstants.h"
#include "GMPUtils.h" // For SplitAt. TODO: Move SplitAt to a central place.
#include "MP4Decoder.h"
#include "VPXDecoder.h"
@@ -128,7 +127,6 @@ WMFVideoMFTManager::~WMFVideoMFTManager()
nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([=]() -> void {
LOG(nsPrintfCString("Reporting telemetry VIDEO_MFT_OUTPUT_NULL_SAMPLES=%d", telemetry).get());
- Telemetry::Accumulate(Telemetry::ID::VIDEO_MFT_OUTPUT_NULL_SAMPLES, telemetry);
});
AbstractThread::MainThread()->Dispatch(task.forget());
}
@@ -511,8 +509,6 @@ WMFVideoMFTManager::InitInternal(bool aForceD3D9)
if (mStreamType == VP9 || mStreamType == VP8) {
return false;
}
- Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
- uint32_t(media::MediaDecoderBackend::WMFSoftware));
}
mDecoder = decoder;
diff --git a/dom/media/systemservices/LoadManager.cpp b/dom/media/systemservices/LoadManager.cpp
index f0f4f83a7..34b8fc7e0 100644
--- a/dom/media/systemservices/LoadManager.cpp
+++ b/dom/media/systemservices/LoadManager.cpp
@@ -15,7 +15,6 @@
#include "nsThreadUtils.h"
#include "nsReadableUtils.h"
#include "nsIObserverService.h"
-#include "mozilla/Telemetry.h"
#include "mozilla/ArrayUtils.h"
// MOZ_LOG=LoadManager:5
@@ -192,23 +191,6 @@ LoadManagerSingleton::RemoveObserver(webrtc::CPULoadStateObserver * aObserver)
for (size_t i = 0; i < MOZ_ARRAY_LENGTH(mTimeInState); i++) {
total += mTimeInState[i];
}
- // Don't include short calls; we don't have reasonable load data, and
- // such short calls rarely reach a stable state. Keep relatively
- // short calls separate from longer ones
- bool log = total > 5*PR_MSEC_PER_SEC;
- bool small = log && total < 30*PR_MSEC_PER_SEC;
- if (log) {
- // Note: We don't care about rounding here; thus total may be < 100
- Telemetry::Accumulate(small ? Telemetry::WEBRTC_LOAD_STATE_RELAXED_SHORT :
- Telemetry::WEBRTC_LOAD_STATE_RELAXED,
- (uint32_t) (mTimeInState[webrtc::CPULoadState::kLoadRelaxed]/total * 100));
- Telemetry::Accumulate(small ? Telemetry::WEBRTC_LOAD_STATE_NORMAL_SHORT :
- Telemetry::WEBRTC_LOAD_STATE_NORMAL,
- (uint32_t) (mTimeInState[webrtc::CPULoadState::kLoadNormal]/total * 100));
- Telemetry::Accumulate(small ? Telemetry::WEBRTC_LOAD_STATE_STRESSED_SHORT :
- Telemetry::WEBRTC_LOAD_STATE_STRESSED,
- (uint32_t) (mTimeInState[webrtc::CPULoadState::kLoadStressed]/total * 100));
- }
for (auto &in_state : mTimeInState) {
in_state = 0;
}
diff --git a/dom/media/test/bug580982.webm b/dom/media/test/bug1377278.webm
index 802019f39..802019f39 100644
--- a/dom/media/test/bug580982.webm
+++ b/dom/media/test/bug1377278.webm
Binary files differ
diff --git a/dom/media/test/bug580982.webm^headers^ b/dom/media/test/bug1377278.webm^headers^
index 4030ea1d3..4030ea1d3 100644
--- a/dom/media/test/bug580982.webm^headers^
+++ b/dom/media/test/bug1377278.webm^headers^
diff --git a/dom/media/test/crashtests/1228484.html b/dom/media/test/crashtests/1228484.html
deleted file mode 100644
index 2b2e9b0f9..000000000
--- a/dom/media/test/crashtests/1228484.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-<script>
-
-var htmlAudio = new Audio(URL.createObjectURL(new window.MediaSource()));
-
-(new window.AudioContext("ringer")).createMediaElementSource(htmlAudio);
-(new window.AudioContext("alarm")).createMediaElementSource(htmlAudio);
-
-</script>
-</head>
-</html>
diff --git a/dom/media/test/crashtests/crashtests.list b/dom/media/test/crashtests/crashtests.list
index 496fe5ee5..e4f25ca8d 100644
--- a/dom/media/test/crashtests/crashtests.list
+++ b/dom/media/test/crashtests/crashtests.list
@@ -81,8 +81,6 @@ load 1157994.html
load 1158427.html
load 1185176.html
load 1185192.html
-load 1223670.html
-load 1228484.html
load 1304948.html
load 1319486.html
load 1291702.html
diff --git a/dom/media/test/manifest.js b/dom/media/test/manifest.js
index c6d533c1b..52e53a271 100644
--- a/dom/media/test/manifest.js
+++ b/dom/media/test/manifest.js
@@ -224,6 +224,9 @@ var gPlayTests = [
// Test playback of a webm file
{ name:"seek-short.webm", type:"video/webm", duration:0.23 },
+ // Test playback of a webm file with 'matroska' doctype
+ { name:"bug1377278.webm", type:"video/webm", duration:4.0 },
+
// Test playback of a WebM file with non-zero start time.
{ name:"split.webm", type:"video/webm", duration:1.967 },
@@ -263,10 +266,10 @@ var gPlayTests = [
{ name:"small-shot.mp3", type:"audio/mpeg", duration:0.27 },
{ name:"owl.mp3", type:"audio/mpeg", duration:3.343 },
// owl.mp3 as above, but with something funny going on in the ID3v2 tag
- // that causes DirectShow to fail.
+ // that caused DirectShow to fail.
{ name:"owl-funny-id3.mp3", type:"audio/mpeg", duration:3.343 },
// owl.mp3 as above, but with something even funnier going on in the ID3v2 tag
- // that causes DirectShow to fail.
+ // that caused DirectShow to fail.
{ name:"owl-funnier-id3.mp3", type:"audio/mpeg", duration:3.343 },
// One second of silence with ~140KB of ID3 tags. Usually when the first MP3
// frame is at such a high offset into the file, MP3FrameParser will give up
@@ -532,7 +535,6 @@ var gErrorTests = [
{ name:"448636.ogv", type:"video/ogg" },
{ name:"bug504843.ogv", type:"video/ogg" },
{ name:"bug501279.ogg", type:"audio/ogg" },
- { name:"bug580982.webm", type:"video/webm" },
{ name:"bug603918.webm", type:"video/webm" },
{ name:"bug604067.webm", type:"video/webm" },
{ name:"bogus.duh", type:"bogus/duh" }
diff --git a/dom/media/test/mochitest.ini b/dom/media/test/mochitest.ini
index ddabf78b6..742ac1b1c 100644
--- a/dom/media/test/mochitest.ini
+++ b/dom/media/test/mochitest.ini
@@ -382,8 +382,6 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
- bug580982.webm
- bug580982.webm^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
@@ -395,6 +393,8 @@ support-files =
bug1301226.wav^headers^
bug1301226-odd.wav
bug1301226-odd.wav^headers^
+ bug1377278.webm
+ bug1377278.webm^headers^
can_play_type_dash.js
can_play_type_ogg.js
can_play_type_wave.js
diff --git a/dom/media/test/test_can_play_type_mpeg.html b/dom/media/test/test_can_play_type_mpeg.html
index 89e5fabef..514b5cc2f 100644
--- a/dom/media/test/test_can_play_type_mpeg.html
+++ b/dom/media/test/test_can_play_type_mpeg.html
@@ -151,8 +151,7 @@ var haveMp4 = (getPref("media.wmf.enabled") && IsWindowsVistaOrLater()) ||
check_mp4(document.getElementById('v'), haveMp4);
-var haveMp3 = getPref("media.directshow.enabled") ||
- (getPref("media.wmf.enabled") && IsWindowsVistaOrLater()) ||
+var haveMp3 = getPref("media.wmf.enabled") ||
(IsLinux() && getPref("media.ffmpeg.enabled")) ||
(IsSupportedAndroid() &&
((IsJellyBeanOrLater() && getPref("media.android-media-codec.enabled")) ||
diff --git a/dom/media/webaudio/AudioBuffer.cpp b/dom/media/webaudio/AudioBuffer.cpp
index cb834f6a5..e7eba2d48 100644
--- a/dom/media/webaudio/AudioBuffer.cpp
+++ b/dom/media/webaudio/AudioBuffer.cpp
@@ -27,7 +27,6 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBuffer)
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(AudioBuffer)
- NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(AudioBuffer)
diff --git a/dom/media/webaudio/AudioContext.cpp b/dom/media/webaudio/AudioContext.cpp
index 85842c811..d58441309 100755
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -179,23 +179,13 @@ AudioContext::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
AudioContext::Constructor(const GlobalObject& aGlobal,
ErrorResult& aRv)
{
- return AudioContext::Constructor(aGlobal,
- AudioChannelService::GetDefaultAudioChannel(),
- aRv);
-}
-
-/* static */ already_AddRefed<AudioContext>
-AudioContext::Constructor(const GlobalObject& aGlobal,
- AudioChannel aChannel,
- ErrorResult& aRv)
-{
nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports());
if (!window) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
- RefPtr<AudioContext> object = new AudioContext(window, false, aChannel);
+ RefPtr<AudioContext> object = new AudioContext(window, false, AudioChannelService::GetDefaultAudioChannel());
aRv = object->Init();
if (NS_WARN_IF(aRv.Failed())) {
return nullptr;
diff --git a/dom/media/webaudio/AudioContext.h b/dom/media/webaudio/AudioContext.h
index 069efa986..599debef8 100644
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -151,12 +151,6 @@ public:
static already_AddRefed<AudioContext>
Constructor(const GlobalObject& aGlobal, ErrorResult& aRv);
- // Constructor for regular AudioContext. A default audio channel is needed.
- static already_AddRefed<AudioContext>
- Constructor(const GlobalObject& aGlobal,
- AudioChannel aChannel,
- ErrorResult& aRv);
-
// Constructor for offline AudioContext
static already_AddRefed<AudioContext>
Constructor(const GlobalObject& aGlobal,
diff --git a/dom/media/webaudio/AudioParam.cpp b/dom/media/webaudio/AudioParam.cpp
index 6f5574993..c1a874264 100644
--- a/dom/media/webaudio/AudioParam.cpp
+++ b/dom/media/webaudio/AudioParam.cpp
@@ -22,7 +22,6 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioParam)
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(AudioParam)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mNode)
- NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(AudioParam)
diff --git a/dom/media/webaudio/MediaBufferDecoder.cpp b/dom/media/webaudio/MediaBufferDecoder.cpp
index e9f1d5a47..f3b75ca1a 100644
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -23,7 +23,6 @@
#include "VideoUtils.h"
#include "WebAudioUtils.h"
#include "mozilla/dom/Promise.h"
-#include "mozilla/Telemetry.h"
#include "nsPrintfCString.h"
#include "GMPService.h"
@@ -45,7 +44,6 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(WebAudioDecodeJob)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutput)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSuccessCallback)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mFailureCallback)
- NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WebAudioDecodeJob)
@@ -302,7 +300,6 @@ MediaDecodeTask::OnMetadataRead(MetadataHolder* aMetadata)
MOZ_LOG(gMediaDecoderLog,
LogLevel::Debug,
("Telemetry (WebAudio) MEDIA_CODEC_USED= '%s'", codec.get()));
- Telemetry::Accumulate(Telemetry::ID::MEDIA_CODEC_USED, codec);
});
AbstractThread::MainThread()->Dispatch(task.forget());
diff --git a/dom/media/webaudio/WaveShaperNode.cpp b/dom/media/webaudio/WaveShaperNode.cpp
index d5c617dcd..4c50f2f1d 100644
--- a/dom/media/webaudio/WaveShaperNode.cpp
+++ b/dom/media/webaudio/WaveShaperNode.cpp
@@ -23,7 +23,6 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(WaveShaperNode, AudioNode)
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(WaveShaperNode, AudioNode)
- NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WaveShaperNode)
diff --git a/dom/media/webm/WebMDecoder.cpp b/dom/media/webm/WebMDecoder.cpp
index b41de6d40..cbe9ffdb7 100644
--- a/dom/media/webm/WebMDecoder.cpp
+++ b/dom/media/webm/WebMDecoder.cpp
@@ -5,6 +5,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/Preferences.h"
+#ifdef MOZ_AV1
+#include "AOMDecoder.h"
+#endif
+#include "MediaPrefs.h"
#include "MediaDecoderStateMachine.h"
#include "WebMDemuxer.h"
#include "WebMDecoder.h"
@@ -38,7 +42,10 @@ WebMDecoder::CanHandleMediaType(const nsACString& aMIMETypeExcludingCodecs,
const bool isWebMAudio = aMIMETypeExcludingCodecs.EqualsASCII("audio/webm");
const bool isWebMVideo = aMIMETypeExcludingCodecs.EqualsASCII("video/webm");
- if (!isWebMAudio && !isWebMVideo) {
+ const bool isMatroskaAudio = aMIMETypeExcludingCodecs.EqualsASCII("audio/x-matroska") ;
+ const bool isMatroskaVideo = aMIMETypeExcludingCodecs.EqualsASCII("video/x-matroska") ;
+
+ if (!isWebMAudio && !isWebMVideo && !isMatroskaAudio && !isMatroskaVideo) {
return false;
}
@@ -59,12 +66,26 @@ WebMDecoder::CanHandleMediaType(const nsACString& aMIMETypeExcludingCodecs,
}
// Note: Only accept VP8/VP9 in a video content type, not in an audio
// content type.
- if (isWebMVideo &&
+ if ((isWebMVideo || isMatroskaVideo) &&
(codec.EqualsLiteral("vp8") || codec.EqualsLiteral("vp8.0") ||
codec.EqualsLiteral("vp9") || codec.EqualsLiteral("vp9.0"))) {
continue;
}
+#ifdef MOZ_AV1
+ if (MediaPrefs::AV1Enabled() && IsAV1CodecString(codec)) {
+ continue;
+ }
+#endif
+
+ if (IsH264CodecString(codec)) {
+ continue;
+ }
+
+ if (IsAACCodecString(codec)) {
+ continue;
+ }
+
// Some unsupported codec.
return false;
}
diff --git a/dom/media/webm/WebMDemuxer.cpp b/dom/media/webm/WebMDemuxer.cpp
index 20ed71581..84b4b506e 100644
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -8,7 +8,11 @@
#include "MediaDecoderStateMachine.h"
#include "AbstractMediaDecoder.h"
#include "MediaResource.h"
+#ifdef MOZ_AV1
+#include "AOMDecoder.h"
+#endif
#include "OpusDecoder.h"
+#include "VPXDecoder.h"
#include "WebMDemuxer.h"
#include "WebMBufferedParser.h"
#include "gfx2DGlue.h"
@@ -24,12 +28,9 @@
#include "mozilla/Sprintf.h"
#include <algorithm>
+#include <numeric>
#include <stdint.h>
-#define VPX_DONT_DEFINE_STDINT_TYPES
-#include "vpx/vp8dx.h"
-#include "vpx/vpx_decoder.h"
-
#define WEBM_DEBUG(arg, ...) MOZ_LOG(gMediaDemuxerLog, mozilla::LogLevel::Debug, ("WebMDemuxer(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
extern mozilla::LazyLogModule gMediaDemuxerLog;
@@ -322,6 +323,23 @@ WebMDemuxer::ReadMetadata()
case NESTEGG_CODEC_VP9:
mInfo.mVideo.mMimeType = "video/webm; codecs=vp9";
break;
+ case NESTEGG_CODEC_AV1:
+ mInfo.mVideo.mMimeType = "video/webm; codecs=av1";
+ break;
+ case NESTEGG_CODEC_AVC1: {
+ mInfo.mVideo.mMimeType = "video/webm; codecs=avc1";
+
+ unsigned char* data = 0;
+ size_t length = 0;
+ r = nestegg_track_codec_data(context, track, 0, &data, &length);
+ if (r == -1) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mInfo.mVideo.mExtraData = new MediaByteBuffer(length);
+ mInfo.mVideo.mExtraData->AppendElements(data, length);
+ break;
+ }
default:
NS_WARNING("Unknown WebM video codec");
return NS_ERROR_FAILURE;
@@ -404,6 +422,8 @@ WebMDemuxer::ReadMetadata()
mInfo.mAudio.mMimeType = "audio/opus";
OpusDataDecoder::AppendCodecDelay(mInfo.mAudio.mCodecSpecificConfig,
media::TimeUnit::FromNanoseconds(params.codec_delay).ToMicroseconds());
+ } else if (mAudioCodec == NESTEGG_CODEC_AAC) {
+ mInfo.mAudio.mMimeType = "audio/mp4a-latm";
}
mSeekPreroll = params.seek_preroll;
mInfo.mAudio.mRate = params.rate;
@@ -549,7 +569,7 @@ WebMDemuxer::GetTrackCrypto(TrackInfo::TrackType aType, size_t aTrackNumber) {
return crypto;
}
-bool
+nsresult
WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSamples)
{
if (mIsMediaSource) {
@@ -557,17 +577,18 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
EnsureUpToDateIndex();
}
- RefPtr<NesteggPacketHolder> holder(NextPacket(aType));
+ RefPtr<NesteggPacketHolder> holder;
+ nsresult rv = NextPacket(aType, holder);
- if (!holder) {
- return false;
+ if (NS_FAILED(rv)) {
+ return rv;
}
int r = 0;
unsigned int count = 0;
r = nestegg_packet_count(holder->Packet(), &count);
if (r == -1) {
- return false;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
int64_t tstamp = holder->Timestamp();
int64_t duration = holder->Duration();
@@ -578,7 +599,11 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
// video frame.
int64_t next_tstamp = INT64_MIN;
if (aType == TrackInfo::kAudioTrack) {
- RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
+ RefPtr<NesteggPacketHolder> next_holder;
+ rv = NextPacket(aType, next_holder);
+ if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+ return rv;
+ }
if (next_holder) {
next_tstamp = next_holder->Timestamp();
PushAudioPacket(next_holder);
@@ -593,7 +618,11 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
}
mLastAudioFrameTime = Some(tstamp);
} else if (aType == TrackInfo::kVideoTrack) {
- RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
+ RefPtr<NesteggPacketHolder> next_holder;
+ rv = NextPacket(aType, next_holder);
+ if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+ return rv;
+ }
if (next_holder) {
next_tstamp = next_holder->Timestamp();
PushVideoPacket(next_holder);
@@ -610,7 +639,7 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
}
if (mIsMediaSource && next_tstamp == INT64_MIN) {
- return false;
+ return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
}
int64_t discardPadding = 0;
@@ -626,7 +655,7 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
r = nestegg_packet_data(holder->Packet(), i, &data, &length);
if (r == -1) {
WEBM_DEBUG("nestegg_packet_data failed r=%d", r);
- return false;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
bool isKeyframe = false;
if (aType == TrackInfo::kAudioTrack) {
@@ -636,29 +665,49 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
// Packet is encrypted, can't peek, use packet info
isKeyframe = nestegg_packet_has_keyframe(holder->Packet()) == NESTEGG_PACKET_HAS_KEYFRAME_TRUE;
} else {
- vpx_codec_stream_info_t si;
- PodZero(&si);
- si.sz = sizeof(si);
+ auto sample = MakeSpan(data, length);
switch (mVideoCodec) {
case NESTEGG_CODEC_VP8:
- vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
+ isKeyframe = VPXDecoder::IsKeyframe(sample, VPXDecoder::Codec::VP8);
break;
case NESTEGG_CODEC_VP9:
- vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si);
+ isKeyframe = VPXDecoder::IsKeyframe(sample, VPXDecoder::Codec::VP9);
+ break;
+#ifdef MOZ_AV1
+ case NESTEGG_CODEC_AV1:
+ isKeyframe = AOMDecoder::IsKeyframe(sample);
break;
+#endif
+ case NESTEGG_CODEC_AVC1:
+ isKeyframe = nestegg_packet_has_keyframe(holder->Packet());
+ break;
+ default:
+ NS_WARNING("Cannot detect keyframes in unknown WebM video codec");
+ return NS_ERROR_FAILURE;
}
- isKeyframe = si.is_kf;
if (isKeyframe) {
- // We only look for resolution changes on keyframes for both VP8 and
- // VP9. Other resolution changes are invalid.
- if (mLastSeenFrameWidth.isSome() && mLastSeenFrameHeight.isSome() &&
- (si.w != mLastSeenFrameWidth.value() ||
- si.h != mLastSeenFrameHeight.value())) {
- mInfo.mVideo.mDisplay = nsIntSize(si.w, si.h);
+ // For both VP8 and VP9, we only look for resolution changes
+ // on keyframes. Other resolution changes are invalid.
+ auto dimensions = nsIntSize(0, 0);
+ switch (mVideoCodec) {
+ case NESTEGG_CODEC_VP8:
+ dimensions = VPXDecoder::GetFrameSize(sample, VPXDecoder::Codec::VP8);
+ break;
+ case NESTEGG_CODEC_VP9:
+ dimensions = VPXDecoder::GetFrameSize(sample, VPXDecoder::Codec::VP9);
+ break;
+#ifdef MOZ_AV1
+ case NESTEGG_CODEC_AV1:
+ dimensions = AOMDecoder::GetFrameSize(sample);
+ break;
+#endif
+ }
+ if (mLastSeenFrameSize.isSome()
+ && (dimensions != mLastSeenFrameSize.value())) {
+ mInfo.mVideo.mDisplay = dimensions;
mSharedVideoTrackInfo = new SharedTrackInfo(mInfo.mVideo, ++sStreamSourceID);
}
- mLastSeenFrameWidth = Some(si.w);
- mLastSeenFrameHeight = Some(si.h);
+ mLastSeenFrameSize = Some(dimensions);
}
}
}
@@ -668,7 +717,7 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
RefPtr<MediaRawData> sample = new MediaRawData(data, length);
if (length && !sample->Data()) {
// OOM.
- return false;
+ return NS_ERROR_OUT_OF_MEMORY;
}
sample->mTimecode = tstamp;
sample->mTime = tstamp;
@@ -719,13 +768,19 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSampl
if (aType == TrackInfo::kVideoTrack) {
sample->mTrackInfo = mSharedVideoTrackInfo;
}
+
+ if (mVideoCodec == NESTEGG_CODEC_AVC1) {
+ sample->mExtraData = mInfo.mVideo.mExtraData;
+ }
+
aSamples->Push(sample);
}
- return true;
+ return NS_OK;
}
-RefPtr<NesteggPacketHolder>
-WebMDemuxer::NextPacket(TrackInfo::TrackType aType)
+nsresult
+WebMDemuxer::NextPacket(TrackInfo::TrackType aType,
+ RefPtr<NesteggPacketHolder>& aPacket)
{
bool isVideo = aType == TrackInfo::kVideoTrack;
@@ -734,56 +789,64 @@ WebMDemuxer::NextPacket(TrackInfo::TrackType aType)
bool hasType = isVideo ? mHasVideo : mHasAudio;
if (!hasType) {
- return nullptr;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
// The packet queue for the type that we are interested in.
WebMPacketQueue &packets = isVideo ? mVideoPackets : mAudioPackets;
if (packets.GetSize() > 0) {
- return packets.PopFront();
+ aPacket = packets.PopFront();
+ return NS_OK;
}
// Track we are interested in
uint32_t ourTrack = isVideo ? mVideoTrack : mAudioTrack;
do {
- RefPtr<NesteggPacketHolder> holder = DemuxPacket(aType);
+ RefPtr<NesteggPacketHolder> holder;
+ nsresult rv = DemuxPacket(aType, holder);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
if (!holder) {
- return nullptr;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
if (ourTrack == holder->Track()) {
- return holder;
+ aPacket = holder;
+ return NS_OK;
}
} while (true);
}
-RefPtr<NesteggPacketHolder>
-WebMDemuxer::DemuxPacket(TrackInfo::TrackType aType)
+nsresult
+WebMDemuxer::DemuxPacket(TrackInfo::TrackType aType,
+ RefPtr<NesteggPacketHolder>& aPacket)
{
nestegg_packet* packet;
int r = nestegg_read_packet(Context(aType), &packet);
if (r == 0) {
nestegg_read_reset(Context(aType));
- return nullptr;
+ return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
} else if (r < 0) {
- return nullptr;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
unsigned int track = 0;
r = nestegg_packet_track(packet, &track);
if (r == -1) {
- return nullptr;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
int64_t offset = Resource(aType).Tell();
RefPtr<NesteggPacketHolder> holder = new NesteggPacketHolder();
if (!holder->Init(packet, offset, track, false)) {
- return nullptr;
+ return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
- return holder;
+ aPacket = holder;
+ return NS_OK;
}
void
@@ -943,7 +1006,14 @@ WebMTrackDemuxer::Seek(media::TimeUnit aTime)
media::TimeUnit seekTime = aTime;
mSamples.Reset();
mParent->SeekInternal(mType, aTime);
- mParent->GetNextPacket(mType, &mSamples);
+ nsresult rv = mParent->GetNextPacket(mType, &mSamples);
+ if (NS_FAILED(rv)) {
+ if (rv == NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
+ // Ignore the error for now, the next GetSample will be rejected with EOS.
+ return SeekPromise::CreateAndResolve(media::TimeUnit(), __func__);
+ }
+ return SeekPromise::CreateAndReject(rv, __func__);
+ }
mNeedKeyframe = true;
// Check what time we actually seeked to.
@@ -956,15 +1026,18 @@ WebMTrackDemuxer::Seek(media::TimeUnit aTime)
return SeekPromise::CreateAndResolve(seekTime, __func__);
}
-RefPtr<MediaRawData>
-WebMTrackDemuxer::NextSample()
+nsresult
+WebMTrackDemuxer::NextSample(RefPtr<MediaRawData>& aData)
{
- while (mSamples.GetSize() < 1 && mParent->GetNextPacket(mType, &mSamples)) {
+ nsresult rv;
+ while (mSamples.GetSize() < 1 &&
+ NS_SUCCEEDED((rv = mParent->GetNextPacket(mType, &mSamples)))) {
}
if (mSamples.GetSize()) {
- return mSamples.PopFront();
+ aData = mSamples.PopFront();
+ return NS_OK;
}
- return nullptr;
+ return rv;
}
RefPtr<WebMTrackDemuxer::SamplesPromise>
@@ -973,9 +1046,12 @@ WebMTrackDemuxer::GetSamples(int32_t aNumSamples)
RefPtr<SamplesHolder> samples = new SamplesHolder;
MOZ_ASSERT(aNumSamples);
+ nsresult rv = NS_ERROR_DOM_MEDIA_END_OF_STREAM;
+
while (aNumSamples) {
- RefPtr<MediaRawData> sample(NextSample());
- if (!sample) {
+ RefPtr<MediaRawData> sample;
+ rv = NextSample(sample);
+ if (NS_FAILED(rv)) {
break;
}
if (mNeedKeyframe && !sample->mKeyframe) {
@@ -987,7 +1063,7 @@ WebMTrackDemuxer::GetSamples(int32_t aNumSamples)
}
if (samples->mSamples.IsEmpty()) {
- return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
+ return SamplesPromise::CreateAndReject(rv, __func__);
} else {
UpdateSamples(samples->mSamples);
return SamplesPromise::CreateAndResolve(samples, __func__);
@@ -1022,7 +1098,8 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
}
// Demux and buffer frames until we find a keyframe.
RefPtr<MediaRawData> sample;
- while (!foundKeyframe && (sample = NextSample())) {
+ nsresult rv = NS_OK;
+ while (!foundKeyframe && NS_SUCCEEDED((rv = NextSample(sample)))) {
if (sample->mKeyframe) {
frameTime = sample->mTime;
foundKeyframe = true;
@@ -1104,10 +1181,11 @@ WebMTrackDemuxer::SkipToNextRandomAccessPoint(media::TimeUnit aTimeThreshold)
uint32_t parsed = 0;
bool found = false;
RefPtr<MediaRawData> sample;
+ nsresult rv = NS_OK;
int64_t sampleTime;
WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
- while (!found && (sample = NextSample())) {
+ while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) {
parsed++;
sampleTime = sample->mTime;
if (sample->mKeyframe && sampleTime >= aTimeThreshold.ToMicroseconds()) {
@@ -1116,7 +1194,9 @@ WebMTrackDemuxer::SkipToNextRandomAccessPoint(media::TimeUnit aTimeThreshold)
mSamples.PushFront(sample.forget());
}
}
- SetNextKeyFrameTime();
+ if (NS_SUCCEEDED(rv)) {
+ SetNextKeyFrameTime();
+ }
if (found) {
WEBM_DEBUG("next sample: %f (parsed: %d)",
media::TimeUnit::FromMicroseconds(sampleTime).ToSeconds(),
diff --git a/dom/media/webm/WebMDemuxer.h b/dom/media/webm/WebMDemuxer.h
index 6fff38e7d..36d381e57 100644
--- a/dom/media/webm/WebMDemuxer.h
+++ b/dom/media/webm/WebMDemuxer.h
@@ -8,9 +8,13 @@
#include "nsTArray.h"
#include "MediaDataDemuxer.h"
+#include "MediaResource.h"
#include "NesteggPacketHolder.h"
#include "mozilla/Move.h"
+#include <deque>
+#include <stdint.h>
+
typedef struct nestegg nestegg;
namespace mozilla {
@@ -111,7 +115,8 @@ public:
bool GetOffsetForTime(uint64_t aTime, int64_t* aOffset);
// Demux next WebM packet and append samples to MediaRawDataQueue
- bool GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSamples);
+ nsresult GetNextPacket(TrackInfo::TrackType aType,
+ MediaRawDataQueue *aSamples);
nsresult Reset(TrackInfo::TrackType aType);
@@ -175,11 +180,13 @@ private:
// Read a packet from the nestegg file. Returns nullptr if all packets for
// the particular track have been read. Pass TrackInfo::kVideoTrack or
// TrackInfo::kVideoTrack to indicate the type of the packet we want to read.
- RefPtr<NesteggPacketHolder> NextPacket(TrackInfo::TrackType aType);
+ nsresult NextPacket(TrackInfo::TrackType aType,
+ RefPtr<NesteggPacketHolder>& aPacket);
// Internal method that demuxes the next packet from the stream. The caller
// is responsible for making sure it doesn't get lost.
- RefPtr<NesteggPacketHolder> DemuxPacket(TrackInfo::TrackType aType);
+ nsresult DemuxPacket(TrackInfo::TrackType aType,
+ RefPtr<NesteggPacketHolder>& aPacket);
// libnestegg audio and video context for webm container.
// Access on reader's thread only.
@@ -237,8 +244,7 @@ private:
int64_t mLastWebMBlockOffset;
const bool mIsMediaSource;
- Maybe<uint32_t> mLastSeenFrameWidth;
- Maybe<uint32_t> mLastSeenFrameHeight;
+ Maybe<nsIntSize> mLastSeenFrameSize;
// This will be populated only if a resolution change occurs, otherwise it
// will be left as null so the original metadata is used
RefPtr<SharedTrackInfo> mSharedVideoTrackInfo;
@@ -276,7 +282,7 @@ private:
~WebMTrackDemuxer();
void UpdateSamples(nsTArray<RefPtr<MediaRawData>>& aSamples);
void SetNextKeyFrameTime();
- RefPtr<MediaRawData> NextSample ();
+ nsresult NextSample(RefPtr<MediaRawData>& aData);
RefPtr<WebMDemuxer> mParent;
TrackInfo::TrackType mType;
UniquePtr<TrackInfo> mInfo;
diff --git a/dom/media/webrtc/RTCCertificate.cpp b/dom/media/webrtc/RTCCertificate.cpp
index 3f778bcbb..9f5e27c56 100644
--- a/dom/media/webrtc/RTCCertificate.cpp
+++ b/dom/media/webrtc/RTCCertificate.cpp
@@ -74,7 +74,7 @@ private:
char buf[sizeof(randomName) * 2 + 4];
PL_strncpy(buf, "CN=", 3);
for (size_t i = 0; i < sizeof(randomName); ++i) {
- snprintf(&buf[i * 2 + 3], 2, "%.2x", randomName[i]);
+ snprintf(&buf[i * 2 + 3], 3, "%.2x", randomName[i]);
}
buf[sizeof(buf) - 1] = '\0';