summaryrefslogtreecommitdiffstats
path: root/dom/media
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media')
-rw-r--r--dom/media/DecoderTraits.cpp110
-rw-r--r--dom/media/GraphDriver.cpp1
-rw-r--r--dom/media/MP3FrameParser.cpp591
-rw-r--r--dom/media/MP3FrameParser.h219
-rw-r--r--dom/media/MediaDecoder.cpp51
-rw-r--r--dom/media/MediaDecoder.h8
-rw-r--r--dom/media/MediaDecoderStateMachine.cpp20
-rw-r--r--dom/media/MediaFormatReader.cpp4
-rw-r--r--dom/media/MediaManager.cpp16
-rw-r--r--dom/media/MediaPrefs.h3
-rw-r--r--dom/media/ThreadPoolCOMListener.h4
-rw-r--r--dom/media/WebVTTListener.h1
-rw-r--r--dom/media/android/AndroidMediaDecoder.cpp25
-rw-r--r--dom/media/android/AndroidMediaDecoder.h28
-rw-r--r--dom/media/android/AndroidMediaPluginHost.cpp305
-rw-r--r--dom/media/android/AndroidMediaPluginHost.h41
-rw-r--r--dom/media/android/AndroidMediaReader.cpp449
-rw-r--r--dom/media/android/AndroidMediaReader.h75
-rw-r--r--dom/media/android/AndroidMediaResourceServer.cpp503
-rw-r--r--dom/media/android/AndroidMediaResourceServer.h96
-rw-r--r--dom/media/android/MPAPI.h165
-rw-r--r--dom/media/android/moz.build27
-rw-r--r--dom/media/directshow/AudioSinkFilter.cpp285
-rw-r--r--dom/media/directshow/AudioSinkFilter.h95
-rw-r--r--dom/media/directshow/AudioSinkInputPin.cpp195
-rw-r--r--dom/media/directshow/AudioSinkInputPin.h76
-rw-r--r--dom/media/directshow/DirectShowDecoder.cpp65
-rw-r--r--dom/media/directshow/DirectShowDecoder.h45
-rw-r--r--dom/media/directshow/DirectShowReader.cpp360
-rw-r--r--dom/media/directshow/DirectShowReader.h110
-rw-r--r--dom/media/directshow/DirectShowUtils.cpp369
-rw-r--r--dom/media/directshow/DirectShowUtils.h125
-rw-r--r--dom/media/directshow/SampleSink.cpp159
-rw-r--r--dom/media/directshow/SampleSink.h67
-rw-r--r--dom/media/directshow/SourceFilter.cpp683
-rw-r--r--dom/media/directshow/SourceFilter.h75
-rw-r--r--dom/media/directshow/moz.build41
-rw-r--r--dom/media/fmp4/MP4Decoder.cpp4
-rw-r--r--dom/media/fmp4/MP4Demuxer.cpp40
-rw-r--r--dom/media/fmp4/MP4Stream.cpp3
-rw-r--r--dom/media/fmp4/moz.build3
-rw-r--r--dom/media/gtest/Cargo.toml7
-rw-r--r--dom/media/gtest/TestMP3Demuxer.cpp1
-rw-r--r--dom/media/gtest/TestMP4Reader.cpp217
-rw-r--r--dom/media/gtest/TestRust.cpp9
-rw-r--r--dom/media/gtest/hello.rs6
-rw-r--r--dom/media/gtest/moz.build1
-rw-r--r--dom/media/mediasource/moz.build3
-rw-r--r--dom/media/moz.build18
-rw-r--r--dom/media/mp3/MP3Decoder.cpp (renamed from dom/media/MP3Decoder.cpp)2
-rw-r--r--dom/media/mp3/MP3Decoder.h (renamed from dom/media/MP3Decoder.h)0
-rw-r--r--dom/media/mp3/MP3Demuxer.cpp (renamed from dom/media/MP3Demuxer.cpp)2
-rw-r--r--dom/media/mp3/MP3Demuxer.h (renamed from dom/media/MP3Demuxer.h)2
-rw-r--r--dom/media/mp3/moz.build17
-rw-r--r--dom/media/platforms/MediaTelemetryConstants.h22
-rw-r--r--dom/media/platforms/PDMFactory.cpp9
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp2
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.h4
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp75
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h1
-rw-r--r--dom/media/platforms/moz.build1
-rw-r--r--dom/media/platforms/omx/OmxPlatformLayer.cpp23
-rw-r--r--dom/media/platforms/wmf/DXVA2Manager.cpp1
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataDecoder.cpp45
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataDecoder.h6
-rw-r--r--dom/media/platforms/wmf/WMFVideoMFTManager.cpp1
-rw-r--r--dom/media/test/crashtests/1228484.html13
-rw-r--r--dom/media/test/crashtests/crashtests.list2
-rw-r--r--dom/media/test/manifest.js4
-rw-r--r--dom/media/test/test_can_play_type_mpeg.html3
-rwxr-xr-xdom/media/webaudio/AudioContext.cpp12
-rw-r--r--dom/media/webaudio/AudioContext.h6
72 files changed, 108 insertions, 5949 deletions
diff --git a/dom/media/DecoderTraits.cpp b/dom/media/DecoderTraits.cpp
index 56ebd9ce9..6aa44f3e5 100644
--- a/dom/media/DecoderTraits.cpp
+++ b/dom/media/DecoderTraits.cpp
@@ -10,7 +10,6 @@
#include "nsCharSeparatedTokenizer.h"
#include "nsMimeTypes.h"
#include "mozilla/Preferences.h"
-#include "mozilla/Telemetry.h"
#include "OggDecoder.h"
#include "OggDemuxer.h"
@@ -18,15 +17,6 @@
#include "WebMDecoder.h"
#include "WebMDemuxer.h"
-#ifdef MOZ_ANDROID_OMX
-#include "AndroidMediaDecoder.h"
-#include "AndroidMediaReader.h"
-#include "AndroidMediaPluginHost.h"
-#endif
-#ifdef MOZ_DIRECTSHOW
-#include "DirectShowDecoder.h"
-#include "DirectShowReader.h"
-#endif
#ifdef MOZ_FMP4
#include "MP4Decoder.h"
#include "MP4Demuxer.h"
@@ -94,45 +84,6 @@ DecoderTraits::IsWebMAudioType(const nsACString& aType)
return aType.EqualsASCII("audio/webm");
}
-static char const *const gHttpLiveStreamingTypes[] = {
- // For m3u8.
- // https://tools.ietf.org/html/draft-pantos-http-live-streaming-19#section-10
- "application/vnd.apple.mpegurl",
- // Some sites serve these as the informal m3u type.
- "application/x-mpegurl",
- "audio/x-mpegurl",
- nullptr
-};
-
-static bool
-IsHttpLiveStreamingType(const nsACString& aType)
-{
- return CodecListContains(gHttpLiveStreamingTypes, aType);
-}
-
-#ifdef MOZ_ANDROID_OMX
-static bool
-IsAndroidMediaType(const nsACString& aType)
-{
- if (!MediaDecoder::IsAndroidMediaPluginEnabled()) {
- return false;
- }
-
- static const char* supportedTypes[] = {
- "audio/mpeg", "audio/mp4", "video/mp4", "video/x-m4v", nullptr
- };
- return CodecListContains(supportedTypes, aType);
-}
-#endif
-
-#ifdef MOZ_DIRECTSHOW
-static bool
-IsDirectShowSupportedType(const nsACString& aType)
-{
- return DirectShowDecoder::GetSupportedCodecs(aType, nullptr);
-}
-#endif
-
#ifdef MOZ_FMP4
static bool
IsMP4SupportedType(const MediaContentType& aParsedType,
@@ -247,14 +198,6 @@ CanHandleCodecsType(const MediaContentType& aType,
if (IsFlacSupportedType(aType.GetMIMEType(), aType.GetCodecs())) {
return CANPLAY_YES;
}
-#ifdef MOZ_DIRECTSHOW
- DirectShowDecoder::GetSupportedCodecs(aType.GetMIMEType(), &codecList);
-#endif
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled()) {
- EnsureAndroidMediaPluginHost()->FindDecoder(aType.GetMIMEType(), &codecList);
- }
-#endif
if (!codecList) {
return CANPLAY_MAYBE;
}
@@ -287,10 +230,6 @@ CanHandleMediaType(const MediaContentType& aType,
{
MOZ_ASSERT(NS_IsMainThread());
- if (IsHttpLiveStreamingType(aType.GetMIMEType())) {
- /* Telemetry STUB */
- }
-
if (aType.HaveCodecs()) {
CanPlayStatus result = CanHandleCodecsType(aType, aDiagnostics);
if (result == CANPLAY_NO || result == CANPLAY_YES) {
@@ -320,17 +259,6 @@ CanHandleMediaType(const MediaContentType& aType,
if (IsFlacSupportedType(aType.GetMIMEType())) {
return CANPLAY_MAYBE;
}
-#ifdef MOZ_DIRECTSHOW
- if (DirectShowDecoder::GetSupportedCodecs(aType.GetMIMEType(), nullptr)) {
- return CANPLAY_MAYBE;
- }
-#endif
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled() &&
- EnsureAndroidMediaPluginHost()->FindDecoder(aType.GetMIMEType(), nullptr)) {
- return CANPLAY_MAYBE;
- }
-#endif
return CANPLAY_NO;
}
@@ -411,28 +339,12 @@ InstantiateDecoder(const nsACString& aType,
decoder = new FlacDecoder(aOwner);
return decoder.forget();
}
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled() &&
- EnsureAndroidMediaPluginHost()->FindDecoder(aType, nullptr)) {
- decoder = new AndroidMediaDecoder(aOwner, aType);
- return decoder.forget();
- }
-#endif
if (IsWebMSupportedType(aType)) {
decoder = new WebMDecoder(aOwner);
return decoder.forget();
}
-#ifdef MOZ_DIRECTSHOW
- // Note: DirectShow should come before WMF, so that we prefer DirectShow's
- // MP3 support over WMF's.
- if (IsDirectShowSupportedType(aType)) {
- decoder = new DirectShowDecoder(aOwner);
- return decoder.forget();
- }
-#endif
-
return nullptr;
}
@@ -461,7 +373,7 @@ MediaDecoderReader* DecoderTraits::CreateReader(const nsACString& aType, Abstrac
} else
#endif
if (IsMP3SupportedType(aType)) {
- decoderReader = new MediaFormatReader(aDecoder, new mp3::MP3Demuxer(aDecoder->GetResource()));
+ decoderReader = new MediaFormatReader(aDecoder, new MP3Demuxer(aDecoder->GetResource()));
} else
if (IsAACSupportedType(aType)) {
decoderReader = new MediaFormatReader(aDecoder, new ADTSDemuxer(aDecoder->GetResource()));
@@ -475,22 +387,10 @@ MediaDecoderReader* DecoderTraits::CreateReader(const nsACString& aType, Abstrac
if (IsOggSupportedType(aType)) {
decoderReader = new MediaFormatReader(aDecoder, new OggDemuxer(aDecoder->GetResource()));
} else
-#ifdef MOZ_ANDROID_OMX
- if (MediaDecoder::IsAndroidMediaPluginEnabled() &&
- EnsureAndroidMediaPluginHost()->FindDecoder(aType, nullptr)) {
- decoderReader = new AndroidMediaReader(aDecoder, aType);
- } else
-#endif
if (IsWebMSupportedType(aType)) {
decoderReader =
new MediaFormatReader(aDecoder, new WebMDemuxer(aDecoder->GetResource()));
- } else
-#ifdef MOZ_DIRECTSHOW
- if (IsDirectShowSupportedType(aType)) {
- decoderReader = new DirectShowReader(aDecoder);
- } else
-#endif
- if (false) {} // dummy if to take care of the dangling else
+ }
return decoderReader;
}
@@ -509,18 +409,12 @@ bool DecoderTraits::IsSupportedInVideoDocument(const nsACString& aType)
return
IsOggSupportedType(aType) ||
IsWebMSupportedType(aType) ||
-#ifdef MOZ_ANDROID_OMX
- (MediaDecoder::IsAndroidMediaPluginEnabled() && IsAndroidMediaType(aType)) ||
-#endif
#ifdef MOZ_FMP4
IsMP4SupportedType(aType, /* DecoderDoctorDiagnostics* */ nullptr) ||
#endif
IsMP3SupportedType(aType) ||
IsAACSupportedType(aType) ||
IsFlacSupportedType(aType) ||
-#ifdef MOZ_DIRECTSHOW
- IsDirectShowSupportedType(aType) ||
-#endif
false;
}
diff --git a/dom/media/GraphDriver.cpp b/dom/media/GraphDriver.cpp
index b60dfee9d..37c692a4b 100644
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -629,7 +629,6 @@ AudioCallbackDriver::Init()
cubeb_stream_params output;
cubeb_stream_params input;
uint32_t latency_frames;
- bool firstStream = CubebUtils::GetFirstStream();
MOZ_ASSERT(!NS_IsMainThread(),
"This is blocking and should never run on the main thread.");
diff --git a/dom/media/MP3FrameParser.cpp b/dom/media/MP3FrameParser.cpp
deleted file mode 100644
index 242e3df00..000000000
--- a/dom/media/MP3FrameParser.cpp
+++ /dev/null
@@ -1,591 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include <algorithm>
-
-#include "nsMemory.h"
-#include "MP3FrameParser.h"
-#include "VideoUtils.h"
-
-
-#define FROM_BIG_ENDIAN(X) ((uint32_t)((uint8_t)(X)[0] << 24 | (uint8_t)(X)[1] << 16 | \
- (uint8_t)(X)[2] << 8 | (uint8_t)(X)[3]))
-
-
-namespace mozilla {
-
-/*
- * Following code taken from http://www.hydrogenaudio.org/forums/index.php?showtopic=85125
- * with permission from the author, Nick Wallette <sirnickity@gmail.com>.
- */
-
-/* BEGIN shameless copy and paste */
-
-// Bitrates - use [version][layer][bitrate]
-const uint16_t mpeg_bitrates[4][4][16] = {
- { // Version 2.5
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Reserved
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 3
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 2
- { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0 } // Layer 1
- },
- { // Reserved
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Invalid
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Invalid
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Invalid
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } // Invalid
- },
- { // Version 2
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Reserved
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 3
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0 }, // Layer 2
- { 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0 } // Layer 1
- },
- { // Version 1
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // Reserved
- { 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0 }, // Layer 3
- { 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0 }, // Layer 2
- { 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0 }, // Layer 1
- }
-};
-
-// Sample rates - use [version][srate]
-const uint16_t mpeg_srates[4][4] = {
- { 11025, 12000, 8000, 0 }, // MPEG 2.5
- { 0, 0, 0, 0 }, // Reserved
- { 22050, 24000, 16000, 0 }, // MPEG 2
- { 44100, 48000, 32000, 0 } // MPEG 1
-};
-
-// Samples per frame - use [version][layer]
-const uint16_t mpeg_frame_samples[4][4] = {
-// Rsvd 3 2 1 < Layer v Version
- { 0, 576, 1152, 384 }, // 2.5
- { 0, 0, 0, 0 }, // Reserved
- { 0, 576, 1152, 384 }, // 2
- { 0, 1152, 1152, 384 } // 1
-};
-
-// Slot size (MPEG unit of measurement) - use [layer]
-const uint8_t mpeg_slot_size[4] = { 0, 1, 1, 4 }; // Rsvd, 3, 2, 1
-
-uint16_t
-MP3Frame::CalculateLength()
-{
- // Lookup real values of these fields
- uint32_t bitrate = mpeg_bitrates[mVersion][mLayer][mBitrate] * 1000;
- uint32_t samprate = mpeg_srates[mVersion][mSampleRate];
- uint16_t samples = mpeg_frame_samples[mVersion][mLayer];
- uint8_t slot_size = mpeg_slot_size[mLayer];
-
- // In-between calculations
- float bps = (float)samples / 8.0;
- float fsize = ( (bps * (float)bitrate) / (float)samprate )
- + ( (mPad) ? slot_size : 0 );
-
- // Frame sizes are truncated integers
- return (uint16_t)fsize;
-}
-
-/* END shameless copy and paste */
-
-
-/** MP3Parser methods **/
-
-MP3Parser::MP3Parser()
- : mCurrentChar(0)
-{ }
-
-void
-MP3Parser::Reset()
-{
- mCurrentChar = 0;
-}
-
-uint16_t
-MP3Parser::ParseFrameLength(uint8_t ch)
-{
- mData.mRaw[mCurrentChar] = ch;
-
- MP3Frame &frame = mData.mFrame;
-
- // Validate MP3 header as we read. We can't mistake the start of an MP3 frame
- // for the middle of another frame due to the sync byte at the beginning
- // of the frame.
-
- // The only valid position for an all-high byte is the sync byte at the
- // beginning of the frame.
- if (ch == 0xff) {
- mCurrentChar = 0;
- }
-
- // Make sure the current byte is valid in context. If not, reset the parser.
- if (mCurrentChar == 2) {
- if (frame.mBitrate == 0x0f) {
- goto fail;
- }
- } else if (mCurrentChar == 1) {
- if (frame.mSync2 != 0x07
- || frame.mVersion == 0x01
- || frame.mLayer == 0x00) {
- goto fail;
- }
- }
-
- // The only valid character at the beginning of the header is 0xff. Fail if
- // it's different.
- if (mCurrentChar == 0 && frame.mSync1 != 0xff) {
- // Couldn't find the sync byte. Fail.
- return 0;
- }
-
- mCurrentChar++;
- MOZ_ASSERT(mCurrentChar <= sizeof(MP3Frame));
-
- // Don't have a full header yet.
- if (mCurrentChar < sizeof(MP3Frame)) {
- return 0;
- }
-
- // Woo, valid header. Return the length.
- mCurrentChar = 0;
- return frame.CalculateLength();
-
-fail:
- Reset();
- return 0;
-}
-
-uint32_t
-MP3Parser::GetSampleRate()
-{
- MP3Frame &frame = mData.mFrame;
- return mpeg_srates[frame.mVersion][frame.mSampleRate];
-}
-
-uint32_t
-MP3Parser::GetSamplesPerFrame()
-{
- MP3Frame &frame = mData.mFrame;
- return mpeg_frame_samples[frame.mVersion][frame.mLayer];
-}
-
-
-/** ID3Parser methods **/
-
-const char sID3Head[3] = { 'I', 'D', '3' };
-const uint32_t ID3_HEADER_LENGTH = 10;
-const uint32_t ID3_FOOTER_LENGTH = 10;
-const uint8_t ID3_FOOTER_PRESENT = 0x10;
-
-ID3Parser::ID3Parser()
- : mCurrentChar(0)
- , mVersion(0)
- , mFlags(0)
- , mHeaderLength(0)
-{ }
-
-void
-ID3Parser::Reset()
-{
- mCurrentChar = mVersion = mFlags = mHeaderLength = 0;
-}
-
-bool
-ID3Parser::ParseChar(char ch)
-{
- switch (mCurrentChar) {
- // The first three bytes of an ID3v2 header must match the string "ID3".
- case 0: case 1: case 2:
- if (ch != sID3Head[mCurrentChar]) {
- goto fail;
- }
- break;
- // The fourth and fifth bytes give the version, between 2 and 4.
- case 3:
- if (ch < '\2' || ch > '\4') {
- goto fail;
- }
- mVersion = uint8_t(ch);
- break;
- case 4:
- if (ch != '\0') {
- goto fail;
- }
- break;
- // The sixth byte gives the flags; valid flags depend on the version.
- case 5:
- if ((ch & (0xff >> mVersion)) != '\0') {
- goto fail;
- }
- mFlags = uint8_t(ch);
- break;
- // Bytes seven through ten give the sum of the byte length of the extended
- // header, the padding and the frames after unsynchronisation.
- // These bytes form a 28-bit integer, with the high bit of each byte unset.
- case 6: case 7: case 8: case 9:
- if (ch & 0x80) {
- goto fail;
- }
- mHeaderLength <<= 7;
- mHeaderLength |= ch;
- if (mCurrentChar == 9) {
- mHeaderLength += ID3_HEADER_LENGTH;
- mHeaderLength += (mFlags & ID3_FOOTER_PRESENT) ? ID3_FOOTER_LENGTH : 0;
- }
- break;
- default:
- MOZ_CRASH("Header already fully parsed!");
- }
-
- mCurrentChar++;
-
- return IsParsed();
-
-fail:
- if (mCurrentChar) {
- Reset();
- return ParseChar(ch);
- }
- Reset();
- return false;
-}
-
-bool
-ID3Parser::IsParsed() const
-{
- return mCurrentChar >= ID3_HEADER_LENGTH;
-}
-
-uint32_t
-ID3Parser::GetHeaderLength() const
-{
- MOZ_ASSERT(IsParsed(),
- "Queried length of ID3 header before parsing finished.");
- return mHeaderLength;
-}
-
-
-/** VBR header helper stuff **/
-
-// Helper function to find a VBR header in an MP3 frame.
-// Based on information from
-// http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
-
-const uint32_t VBRI_TAG = FROM_BIG_ENDIAN("VBRI");
-const uint32_t VBRI_OFFSET = 32 - sizeof(MP3Frame);
-const uint32_t VBRI_FRAME_COUNT_OFFSET = VBRI_OFFSET + 14;
-const uint32_t VBRI_MIN_FRAME_SIZE = VBRI_OFFSET + 26;
-
-const uint32_t XING_TAG = FROM_BIG_ENDIAN("Xing");
-enum XingFlags {
- XING_HAS_NUM_FRAMES = 0x01,
- XING_HAS_NUM_BYTES = 0x02,
- XING_HAS_TOC = 0x04,
- XING_HAS_VBR_SCALE = 0x08
-};
-
-static int64_t
-ParseXing(const char *aBuffer)
-{
- uint32_t flags = FROM_BIG_ENDIAN(aBuffer + 4);
-
- if (!(flags & XING_HAS_NUM_FRAMES)) {
- NS_WARNING("VBR file without frame count. Duration estimation likely to "
- "be totally wrong.");
- return -1;
- }
-
- int64_t numFrames = -1;
- if (flags & XING_HAS_NUM_FRAMES) {
- numFrames = FROM_BIG_ENDIAN(aBuffer + 8);
- }
-
- return numFrames;
-}
-
-static int64_t
-FindNumVBRFrames(const nsCString& aFrame)
-{
- const char *buffer = aFrame.get();
- const char *bufferEnd = aFrame.get() + aFrame.Length();
-
- // VBRI header is nice and well-defined; let's try to find that first.
- if (aFrame.Length() > VBRI_MIN_FRAME_SIZE &&
- FROM_BIG_ENDIAN(buffer + VBRI_OFFSET) == VBRI_TAG) {
- return FROM_BIG_ENDIAN(buffer + VBRI_FRAME_COUNT_OFFSET);
- }
-
- // We have to search for the Xing header as its position can change.
- for (; buffer + sizeof(XING_TAG) < bufferEnd; buffer++) {
- if (FROM_BIG_ENDIAN(buffer) == XING_TAG) {
- return ParseXing(buffer);
- }
- }
-
- return -1;
-}
-
-
-/** MP3FrameParser methods **/
-
-// Some MP3's have large ID3v2 tags, up to 150KB, so we allow lots of
-// skipped bytes to be read, just in case, before we give up and assume
-// we're not parsing an MP3 stream.
-static const uint32_t MAX_SKIPPED_BYTES = 4096;
-
-enum {
- MP3_HEADER_LENGTH = 4,
-};
-
-MP3FrameParser::MP3FrameParser(int64_t aLength)
-: mLock("MP3FrameParser.mLock"),
- mTotalID3Size(0),
- mTotalFrameSize(0),
- mFrameCount(0),
- mOffset(0),
- mLength(aLength),
- mMP3Offset(-1),
- mSamplesPerSecond(0),
- mFirstFrameEnd(-1),
- mIsMP3(MAYBE_MP3)
-{ }
-
-nsresult MP3FrameParser::ParseBuffer(const uint8_t* aBuffer,
- uint32_t aLength,
- int64_t aStreamOffset,
- uint32_t* aOutBytesRead)
-{
- // Iterate forwards over the buffer, looking for ID3 tag, or MP3
- // Frame headers.
- const uint8_t *buffer = aBuffer;
- const uint8_t *bufferEnd = aBuffer + aLength;
-
- // If we haven't found any MP3 frame data yet, there might be ID3 headers
- // we can skip over.
- if (mMP3Offset < 0) {
- for (const uint8_t *ch = buffer; ch < bufferEnd; ch++) {
- if (mID3Parser.ParseChar(*ch)) {
- // Found an ID3 header. We don't care about the body of the header, so
- // just skip past.
- buffer = ch + mID3Parser.GetHeaderLength() - (ID3_HEADER_LENGTH - 1);
-
- if (buffer <= ch) {
- return NS_ERROR_FAILURE;
- }
-
- ch = buffer;
-
- mTotalID3Size += mID3Parser.GetHeaderLength();
-
- // Yes, this is an MP3!
- mIsMP3 = DEFINITELY_MP3;
-
- mID3Parser.Reset();
- }
- }
- }
-
- // The first MP3 frame in a variable bitrate stream can contain metadata
- // for duration estimation and seeking, so we buffer that first frame here.
- if (aStreamOffset < mFirstFrameEnd) {
- uint64_t copyLen = std::min((int64_t)aLength, mFirstFrameEnd - aStreamOffset);
- mFirstFrame.Append((const char *)buffer, copyLen);
- buffer += copyLen;
- }
-
- while (buffer < bufferEnd) {
- uint16_t frameLen = mMP3Parser.ParseFrameLength(*buffer);
-
- if (frameLen) {
- // We've found an MP3 frame!
- // This is the first frame (and the only one we'll bother parsing), so:
- // * Mark this stream as MP3;
- // * Store the offset at which the MP3 data started; and
- // * Start buffering the frame, as it might contain handy metadata.
-
- // We're now sure this is an MP3 stream.
- mIsMP3 = DEFINITELY_MP3;
-
- // We need to know these to convert the number of frames in the stream
- // to the length of the stream in seconds.
- mSamplesPerSecond = mMP3Parser.GetSampleRate();
- mSamplesPerFrame = mMP3Parser.GetSamplesPerFrame();
-
- // If the stream has a constant bitrate, we should only need the length
- // of the first frame and the length (in bytes) of the stream to
- // estimate the length (in seconds).
- mTotalFrameSize += frameLen;
- mFrameCount++;
-
- // If |mMP3Offset| isn't set then this is the first MP3 frame we have
- // seen in the stream, which is useful for duration estimation.
- if (mMP3Offset > -1) {
- uint16_t skip = frameLen - sizeof(MP3Frame);
- buffer += skip ? skip : 1;
- continue;
- }
-
- // Remember the offset of the MP3 stream.
- // We're at the last byte of an MP3Frame, so MP3 data started
- // sizeof(MP3Frame) - 1 bytes ago.
- mMP3Offset = aStreamOffset
- + (buffer - aBuffer)
- - (sizeof(MP3Frame) - 1);
-
- buffer++;
-
- // If the stream has a variable bitrate, the first frame has metadata
- // we need for duration estimation and seeking. Start buffering it so we
- // can parse it later.
- mFirstFrameEnd = mMP3Offset + frameLen;
- uint64_t currOffset = buffer - aBuffer + aStreamOffset;
- uint64_t copyLen = std::min(mFirstFrameEnd - currOffset,
- (uint64_t)(bufferEnd - buffer));
- mFirstFrame.Append((const char *)buffer, copyLen);
-
- buffer += copyLen;
-
- } else {
- // Nothing to see here. Move along.
- buffer++;
- }
- }
-
- *aOutBytesRead = buffer - aBuffer;
-
- if (mFirstFrameEnd > -1 && mFirstFrameEnd <= aStreamOffset + buffer - aBuffer) {
- // We have our whole first frame. Try to find a VBR header.
- mNumFrames = FindNumVBRFrames(mFirstFrame);
- mFirstFrameEnd = -1;
- }
-
- return NS_OK;
-}
-
-void MP3FrameParser::Parse(const uint8_t* aBuffer, uint32_t aLength, uint64_t aOffset)
-{
- MutexAutoLock mon(mLock);
-
- if (HasExactDuration()) {
- // We know the duration; nothing to do here.
- return;
- }
-
- const uint8_t* buffer = aBuffer;
- int32_t length = aLength;
- uint64_t offset = aOffset;
-
- // Got some data we have seen already. Skip forward to what we need.
- if (aOffset < mOffset) {
- buffer += mOffset - aOffset;
- length -= mOffset - aOffset;
- offset = mOffset;
-
- if (length <= 0) {
- return;
- }
- }
-
- // If there is a discontinuity in the input stream, reset the state of the
- // parsers so we don't get any partial headers.
- if (mOffset < aOffset) {
- if (!mID3Parser.IsParsed()) {
- // Only reset this if it hasn't finished yet.
- mID3Parser.Reset();
- }
-
- if (mFirstFrameEnd > -1) {
- NS_WARNING("Discontinuity in input while buffering first frame.");
- mFirstFrameEnd = -1;
- }
-
- mMP3Parser.Reset();
- }
-
- uint32_t bytesRead = 0;
- if (NS_FAILED(ParseBuffer(buffer,
- length,
- offset,
- &bytesRead))) {
- return;
- }
-
- MOZ_ASSERT(length <= (int)bytesRead, "All bytes should have been consumed");
-
- // Update next data offset
- mOffset = offset + bytesRead;
-
- // If we've parsed lots of data and we still have nothing, just give up.
- // We don't count ID3 headers towards the skipped bytes count, as MP3 files
- // can have massive ID3 sections.
- if (!mID3Parser.IsParsed() && mMP3Offset < 0 &&
- mOffset - mTotalID3Size > MAX_SKIPPED_BYTES) {
- mIsMP3 = NOT_MP3;
- }
-}
-
-int64_t MP3FrameParser::GetDuration()
-{
- MutexAutoLock mon(mLock);
-
- if (!ParsedHeaders() || !mSamplesPerSecond) {
- // Not a single frame decoded yet.
- return -1;
- }
-
- MOZ_ASSERT(mFrameCount > 0 && mTotalFrameSize > 0,
- "Frame parser should have seen at least one MP3 frame of positive length.");
-
- if (!mFrameCount || !mTotalFrameSize) {
- // This should never happen.
- return -1;
- }
-
- double frames;
- if (mNumFrames < 0) {
- // Estimate the number of frames in the stream based on the average frame
- // size and the length of the MP3 file.
- double frameSize = (double)mTotalFrameSize / mFrameCount;
- frames = (double)(mLength - mMP3Offset) / frameSize;
- } else {
- // We know the exact number of frames from the VBR header.
- frames = mNumFrames;
- }
-
- // The duration of each frame is constant over a given stream.
- double usPerFrame = USECS_PER_S * mSamplesPerFrame / mSamplesPerSecond;
-
- return frames * usPerFrame;
-}
-
-int64_t MP3FrameParser::GetMP3Offset()
-{
- MutexAutoLock mon(mLock);
- return mMP3Offset;
-}
-
-bool MP3FrameParser::ParsedHeaders()
-{
- // We have seen both the beginning and the end of the first MP3 frame in the
- // stream.
- return mMP3Offset > -1 && mFirstFrameEnd < 0;
-}
-
-bool MP3FrameParser::HasExactDuration()
-{
- return ParsedHeaders() && mNumFrames > -1;
-}
-
-bool MP3FrameParser::NeedsData()
-{
- // If we don't know the duration exactly then either:
- // - we're still waiting for a VBR header; or
- // - we look at all frames to constantly update our duration estimate.
- return IsMP3() && !HasExactDuration();
-}
-
-} // namespace mozilla
diff --git a/dom/media/MP3FrameParser.h b/dom/media/MP3FrameParser.h
deleted file mode 100644
index d2ba791fd..000000000
--- a/dom/media/MP3FrameParser.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef MP3FrameParser_h
-#define MP3FrameParser_h
-
-#include <stdint.h>
-
-#include "mozilla/Mutex.h"
-#include "nsString.h"
-#include "Intervals.h"
-
-namespace mozilla {
-
-// Simple parser to tell whether we've found an ID3 header and how long it is,
-// so that we can skip it.
-// XXX maybe actually parse this stuff?
-class ID3Parser
-{
-public:
- ID3Parser();
-
- void Reset();
- bool ParseChar(char ch);
- bool IsParsed() const;
- uint32_t GetHeaderLength() const;
-
-private:
- uint32_t mCurrentChar;
- uint8_t mVersion;
- uint8_t mFlags;
- uint32_t mHeaderLength;
-};
-
-struct MP3Frame {
- uint16_t mSync1 : 8; // Always all set
- uint16_t mProtected : 1; // Ignored
- uint16_t mLayer : 2;
- uint16_t mVersion : 2;
- uint16_t mSync2 : 3; // Always all set
- uint16_t mPrivate : 1; // Ignored
- uint16_t mPad : 1;
- uint16_t mSampleRate : 2; // Index into mpeg_srates above
- uint16_t mBitrate : 4; // Index into mpeg_bitrates above
-
- uint16_t CalculateLength();
-};
-
-// Buffering parser for MP3 frames.
-class MP3Parser
-{
-public:
- MP3Parser();
-
- // Forget all data the parser has seen so far.
- void Reset();
-
- // Parse the given byte. If we have found a frame header, return the length of
- // the frame.
- uint16_t ParseFrameLength(uint8_t ch);
-
- // Get the sample rate from the current header.
- uint32_t GetSampleRate();
-
- // Get the number of samples per frame.
- uint32_t GetSamplesPerFrame();
-
-private:
- uint32_t mCurrentChar;
- union {
- uint8_t mRaw[3];
- MP3Frame mFrame;
- } mData;
-};
-
-
-// A description of the MP3 format and its extensions is available at
-//
-// http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
-//
-// The data in MP3 streams is split into small frames, with each frame
-// containing a fixed number of samples. The duration of a frame depends
-// on the frame's bit rate and sample rate. Both values can vary among
-// frames, so it is necessary to examine each individual frame of an MP3
-// stream to calculate the stream's overall duration.
-//
-// The MP3 frame parser extracts information from an MP3 data stream. It
-// accepts a range of frames of an MP3 stream as input, and parses all
-// frames for their duration. Callers can query the stream's overall
-// duration from the parser.
-//
-// Call the methods NotifyDataArrived or Parse to add new data. If you added
-// information for a certain stream position, you cannot go back to previous
-// positions. The parser will simply ignore the input. If you skip stream
-// positions, the duration of the related MP3 frames will be estimated from
-// the stream's average.
-//
-// The method GetDuration returns calculated duration of the stream, including
-// estimates for skipped ranges.
-//
-// All public methods are thread-safe.
-
-class MP3FrameParser
-{
-public:
- explicit MP3FrameParser(int64_t aLength=-1);
-
- bool IsMP3() {
- MutexAutoLock mon(mLock);
- return mIsMP3 != NOT_MP3;
- }
-
- void Parse(const uint8_t* aBuffer, uint32_t aLength, uint64_t aStreamOffset);
-
- // Returns the duration, in microseconds. If the entire stream has not
- // been parsed yet, this is an estimate based on the bitrate of the
- // frames parsed so far.
- int64_t GetDuration();
-
- // Returns the offset of the first MP3 frame in the stream, or -1 of
- // no MP3 frame has been detected yet.
- int64_t GetMP3Offset();
-
- // Returns true if we've seen the whole first frame of the MP3 stream, and
- // therefore can make an estimate on the stream duration.
- // Otherwise, returns false.
- bool ParsedHeaders();
-
- // Returns true if we know the exact duration of the MP3 stream;
- // false otherwise.
- bool HasExactDuration();
-
- // Returns true if the parser needs more data for duration estimation.
- bool NeedsData();
- // Assign the total lenght of this mp3 stream
- void SetLength(int64_t aLength) {
- MutexAutoLock mon(mLock);
- mLength = aLength;
- }
-private:
-
- // Parses aBuffer, starting at offset 0. Returns the number of bytes
- // parsed, relative to the start of the buffer. Note this may be
- // greater than aLength if the headers in the buffer indicate that
- // the frame or ID3 tag extends outside of aBuffer. Returns failure
- // if too many non-MP3 bytes are parsed.
- nsresult ParseBuffer(const uint8_t* aBuffer,
- uint32_t aLength,
- int64_t aStreamOffset,
- uint32_t* aOutBytesRead);
-
- // A low-contention lock for protecting the parser results
- Mutex mLock;
-
- // ID3 header parser. Keeps state between reads in case the header falls
- // in between.
- ID3Parser mID3Parser;
-
- // MP3 frame header parser.
- MP3Parser mMP3Parser;
-
- // If we read |MAX_SKIPPED_BYTES| from the stream without finding any MP3
- // frames, we give up and report |NOT_MP3|. Here we track the cumulative size
- // of any ID3 headers we've seen so big ID3 sections aren't counted towards
- // skipped bytes.
- uint32_t mTotalID3Size;
-
- // All fields below are protected by mLock
-
- // We keep stats on the size of all the frames we've seen, as well as how many
- // so that we can estimate the duration of the rest of the stream.
- uint64_t mTotalFrameSize;
- uint64_t mFrameCount;
-
- // Offset of the last data parsed. This is the end offset of the last data
- // block parsed, so it's the start offset we expect to get on the next
- // call to Parse().
- uint64_t mOffset;
-
- // Total length of the stream in bytes.
- int64_t mLength;
-
- // Offset of first MP3 frame in the bitstream. Has value -1 until the
- // first MP3 frame is found.
- int64_t mMP3Offset;
-
- // The exact number of frames in this stream, if we know it. -1 otherwise.
- int64_t mNumFrames;
-
- // Number of audio samples per second and per frame. Fixed through the whole
- // file. If we know these variables as well as the number of frames in the
- // file, we can get an exact duration for the stream.
- uint16_t mSamplesPerSecond;
- uint16_t mSamplesPerFrame;
-
- // If the MP3 has a variable bitrate, then there *should* be metadata about
- // the encoding in the first frame. We buffer the first frame here.
- nsCString mFirstFrame;
-
- // While we are reading the first frame, this is the stream offset of the
- // last byte of that frame. -1 at all other times.
- int64_t mFirstFrameEnd;
-
- enum eIsMP3 {
- MAYBE_MP3, // We're giving the stream the benefit of the doubt...
- DEFINITELY_MP3, // We've hit at least one ID3 tag or MP3 frame.
- NOT_MP3 // Not found any evidence of the stream being MP3.
- };
-
- eIsMP3 mIsMP3;
-
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/MediaDecoder.cpp b/dom/media/MediaDecoder.cpp
index d027818de..223c59c3b 100644
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -30,15 +30,10 @@
#include "mozilla/dom/VideoTrack.h"
#include "mozilla/dom/VideoTrackList.h"
#include "nsPrintfCString.h"
-#include "mozilla/Telemetry.h"
#include "GMPService.h"
#include "Layers.h"
#include "mozilla/layers/ShadowLayers.h"
-#ifdef MOZ_ANDROID_OMX
-#include "AndroidBridge.h"
-#endif
-
using namespace mozilla::dom;
using namespace mozilla::layers;
using namespace mozilla::media;
@@ -425,7 +420,6 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
, INIT_CANONICAL(mPlaybackRateReliable, true)
, INIT_CANONICAL(mDecoderPosition, 0)
, INIT_CANONICAL(mIsVisible, !aOwner->IsHidden())
- , mTelemetryReported(false)
{
MOZ_COUNT_CTOR(MediaDecoder);
MOZ_ASSERT(NS_IsMainThread());
@@ -845,41 +839,6 @@ MediaDecoder::MetadataLoaded(nsAutoPtr<MediaInfo> aInfo,
// So we call Invalidate() after calling mOwner->MetadataLoaded to ensure
// the media element has the latest dimensions.
Invalidate();
-
- EnsureTelemetryReported();
-}
-
-void
-MediaDecoder::EnsureTelemetryReported()
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- if (mTelemetryReported || !mInfo) {
- // Note: sometimes we get multiple MetadataLoaded calls (for example
- // for chained ogg). So we ensure we don't report duplicate results for
- // these resources.
- return;
- }
-
- nsTArray<nsCString> codecs;
- if (mInfo->HasAudio() && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
- codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
- }
- if (mInfo->HasVideo() && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
- codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
- }
- if (codecs.IsEmpty()) {
- if (mResource->GetContentType().IsEmpty()) {
- NS_WARNING("Somehow the resource's content type is empty");
- return;
- }
- codecs.AppendElement(nsPrintfCString("resource; %s", mResource->GetContentType().get()));
- }
- for (const nsCString& codec : codecs) {
- DECODER_LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
- }
-
- mTelemetryReported = true;
}
const char*
@@ -1617,16 +1576,6 @@ MediaDecoder::IsWebMEnabled()
return Preferences::GetBool("media.webm.enabled");
}
-#ifdef MOZ_ANDROID_OMX
-bool
-MediaDecoder::IsAndroidMediaPluginEnabled()
-{
- return AndroidBridge::Bridge() &&
- AndroidBridge::Bridge()->GetAPIVersion() < 16 &&
- Preferences::GetBool("media.plugins.enabled");
-}
-#endif
-
NS_IMETHODIMP
MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize)
diff --git a/dom/media/MediaDecoder.h b/dom/media/MediaDecoder.h
index a4edcbe72..05e88db8b 100644
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -440,17 +440,11 @@ private:
void SetCDMProxy(CDMProxy* aProxy);
- void EnsureTelemetryReported();
-
static bool IsOggEnabled();
static bool IsOpusEnabled();
static bool IsWaveEnabled();
static bool IsWebMEnabled();
-#ifdef MOZ_ANDROID_OMX
- static bool IsAndroidMediaPluginEnabled();
-#endif
-
#ifdef MOZ_WMF
static bool IsWMFEnabled();
#endif
@@ -856,8 +850,6 @@ private:
// download has ended. Called on the main thread only. aStatus is
// the result from OnStopRequest.
void NotifyDownloadEnded(nsresult aStatus);
-
- bool mTelemetryReported;
};
} // namespace mozilla
diff --git a/dom/media/MediaDecoderStateMachine.cpp b/dom/media/MediaDecoderStateMachine.cpp
index 5bc1d95ef..2ed1956c9 100644
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -1173,27 +1173,12 @@ StateObject::HandleShutdown()
return SetState<ShutdownState>();
}
-static void
-ReportRecoveryTelemetry(const TimeStamp& aRecoveryStart,
- const MediaInfo& aMediaInfo,
- bool aIsHardwareAccelerated)
-{
-/* STUB */
-}
-
void
MediaDecoderStateMachine::
StateObject::HandleResumeVideoDecoding()
{
MOZ_ASSERT(mMaster->mVideoDecodeSuspended);
- // Start counting recovery time from right now.
- TimeStamp start = TimeStamp::Now();
-
- // Local reference to mInfo, so that it will be copied in the lambda below.
- auto& info = Info();
- bool hw = Reader()->VideoIsHardwareAccelerated();
-
// Start video-only seek to the current time.
SeekJob seekJob;
@@ -1205,10 +1190,7 @@ StateObject::HandleResumeVideoDecoding()
type,
true /* aVideoOnly */);
- SetState<SeekingState>(Move(seekJob), EventVisibility::Suppressed)->Then(
- AbstractThread::MainThread(), __func__,
- [start, info, hw](){ ReportRecoveryTelemetry(start, info, hw); },
- [](){});
+ SetState<SeekingState>(Move(seekJob), EventVisibility::Suppressed);
}
void
diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp
index 06e8b963b..773434710 100644
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -32,6 +32,10 @@ using mozilla::layers::Image;
using mozilla::layers::LayerManager;
using mozilla::layers::LayersBackend;
+// avoid redefined macro warning in unified builds
+#undef LOG
+#undef LOGV
+
static mozilla::LazyLogModule sFormatDecoderLog("MediaFormatReader");
mozilla::LazyLogModule gMediaDemuxerLog("MediaDemuxer");
diff --git a/dom/media/MediaManager.cpp b/dom/media/MediaManager.cpp
index baaf45416..288f2e74d 100644
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -2033,7 +2033,6 @@ MediaManager::GetUserMedia(nsPIDOMWindowInner* aWindow,
}
// Determine permissions early (while we still have a stack).
-
nsIURI* docURI = aWindow->GetDocumentURI();
if (!docURI) {
return NS_ERROR_UNEXPECTED;
@@ -2043,22 +2042,9 @@ MediaManager::GetUserMedia(nsPIDOMWindowInner* aWindow,
Preferences::GetBool("media.navigator.permission.disabled", false);
bool isHTTPS = false;
docURI->SchemeIs("https", &isHTTPS);
- nsCString host;
- nsresult rv = docURI->GetHost(host);
- // Test for some other schemes that ServiceWorker recognizes
- bool isFile;
- docURI->SchemeIs("file", &isFile);
- bool isApp;
- docURI->SchemeIs("app", &isApp);
- // Same localhost check as ServiceWorkers uses
- // (see IsOriginPotentiallyTrustworthy())
- bool isLocalhost = NS_SUCCEEDED(rv) &&
- (host.LowerCaseEqualsLiteral("localhost") ||
- host.LowerCaseEqualsLiteral("127.0.0.1") ||
- host.LowerCaseEqualsLiteral("::1"));
nsCString origin;
- rv = nsPrincipal::GetOriginForURI(docURI, origin);
+ nsresult rv = nsPrincipal::GetOriginForURI(docURI, origin);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
diff --git a/dom/media/MediaPrefs.h b/dom/media/MediaPrefs.h
index e67796edd..c67a89989 100644
--- a/dom/media/MediaPrefs.h
+++ b/dom/media/MediaPrefs.h
@@ -105,9 +105,6 @@ private:
DECL_MEDIA_PREF("media.eme.enabled", EMEEnabled, bool, false);
DECL_MEDIA_PREF("media.use-blank-decoder", PDMUseBlankDecoder, bool, false);
DECL_MEDIA_PREF("media.gpu-process-decoder", PDMUseGPUDecoder, bool, false);
-#ifdef MOZ_GONK_MEDIACODEC
- DECL_MEDIA_PREF("media.gonk.enabled", PDMGonkDecoderEnabled, bool, true);
-#endif
#ifdef MOZ_WIDGET_ANDROID
DECL_MEDIA_PREF("media.android-media-codec.enabled", PDMAndroidMediaCodecEnabled, bool, false);
DECL_MEDIA_PREF("media.android-media-codec.preferred", PDMAndroidMediaCodecPreferred, bool, false);
diff --git a/dom/media/ThreadPoolCOMListener.h b/dom/media/ThreadPoolCOMListener.h
index 881013a78..424ca65d2 100644
--- a/dom/media/ThreadPoolCOMListener.h
+++ b/dom/media/ThreadPoolCOMListener.h
@@ -13,8 +13,8 @@
namespace mozilla {
// Thread pool listener which ensures that MSCOM is initialized and
-// deinitialized on the thread pool thread. We may call into WMF or
-// DirectShow on this thread, so we need MSCOM working.
+// deinitialized on the thread pool thread. We may call into WMF on
+// this thread, so we need MSCOM working.
class MSCOMInitThreadPoolListener final : public nsIThreadPoolListener {
~MSCOMInitThreadPoolListener() {}
public:
diff --git a/dom/media/WebVTTListener.h b/dom/media/WebVTTListener.h
index 67271664a..461d7f00d 100644
--- a/dom/media/WebVTTListener.h
+++ b/dom/media/WebVTTListener.h
@@ -10,6 +10,7 @@
#include "nsIStreamListener.h"
#include "nsIChannelEventSink.h"
#include "nsIInterfaceRequestor.h"
+#include "nsCOMPtr.h"
#include "nsCycleCollectionParticipant.h"
class nsIWebVTTParserWrapper;
diff --git a/dom/media/android/AndroidMediaDecoder.cpp b/dom/media/android/AndroidMediaDecoder.cpp
deleted file mode 100644
index 41ef3fcb0..000000000
--- a/dom/media/android/AndroidMediaDecoder.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "MediaDecoderStateMachine.h"
-#include "AndroidMediaDecoder.h"
-#include "AndroidMediaReader.h"
-
-namespace mozilla {
-
-AndroidMediaDecoder::AndroidMediaDecoder(MediaDecoderOwner* aOwner,
- const nsACString& aType)
- : MediaDecoder(aOwner), mType(aType)
-{
-}
-
-MediaDecoderStateMachine* AndroidMediaDecoder::CreateStateMachine()
-{
- return new MediaDecoderStateMachine(this, new AndroidMediaReader(this, mType));
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/android/AndroidMediaDecoder.h b/dom/media/android/AndroidMediaDecoder.h
deleted file mode 100644
index 88b5a243f..000000000
--- a/dom/media/android/AndroidMediaDecoder.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaDecoder_h_)
-#define AndroidMediaDecoder_h_
-
-#include "MediaDecoder.h"
-#include "AndroidMediaDecoder.h"
-
-namespace mozilla {
-
-class AndroidMediaDecoder : public MediaDecoder
-{
- nsCString mType;
-public:
- AndroidMediaDecoder(MediaDecoderOwner* aOwner, const nsACString& aType);
-
- MediaDecoder* Clone(MediaDecoderOwner* aOwner) override {
- return new AndroidMediaDecoder(aOwner, mType);
- }
- MediaDecoderStateMachine* CreateStateMachine() override;
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/AndroidMediaPluginHost.cpp b/dom/media/android/AndroidMediaPluginHost.cpp
deleted file mode 100644
index d4c4fc59e..000000000
--- a/dom/media/android/AndroidMediaPluginHost.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "mozilla/Preferences.h"
-#include "MediaResource.h"
-#include "mozilla/dom/HTMLMediaElement.h"
-#include "mozilla/Services.h"
-#include "AndroidMediaPluginHost.h"
-#include "nsAutoPtr.h"
-#include "nsXPCOMStrings.h"
-#include "nsISeekableStream.h"
-#include "nsIGfxInfo.h"
-#include "prmem.h"
-#include "prlink.h"
-#include "AndroidMediaResourceServer.h"
-#include "nsServiceManagerUtils.h"
-
-#include "MPAPI.h"
-
-#include "nsIPropertyBag2.h"
-
-#if defined(ANDROID)
-#include "android/log.h"
-#define ALOG(args...) __android_log_print(ANDROID_LOG_INFO, "AndroidMediaPluginHost" , ## args)
-#else
-#define ALOG(args...) /* do nothing */
-#endif
-
-using namespace MPAPI;
-
-Decoder::Decoder() :
- mResource(nullptr), mPrivate(nullptr)
-{
-}
-
-namespace mozilla {
-
-static char* GetResource(Decoder *aDecoder)
-{
- return static_cast<char*>(aDecoder->mResource);
-}
-
-class GetIntPrefEvent : public Runnable {
-public:
- GetIntPrefEvent(const char* aPref, int32_t* aResult)
- : mPref(aPref), mResult(aResult) {}
- NS_IMETHOD Run() override {
- return Preferences::GetInt(mPref, mResult);
- }
-private:
- const char* mPref;
- int32_t* mResult;
-};
-
-static bool GetIntPref(const char* aPref, int32_t* aResult)
-{
- // GetIntPref() is called on the decoder thread, but the Preferences API
- // can only be called on the main thread. Post a runnable and wait.
- NS_ENSURE_TRUE(aPref, false);
- NS_ENSURE_TRUE(aResult, false);
- nsCOMPtr<nsIRunnable> event = new GetIntPrefEvent(aPref, aResult);
- return NS_SUCCEEDED(NS_DispatchToMainThread(event, NS_DISPATCH_SYNC));
-}
-
-static bool
-GetSystemInfoString(const char *aKey, char *aResult, size_t aResultLength)
-{
- NS_ENSURE_TRUE(aKey, false);
- NS_ENSURE_TRUE(aResult, false);
-
- nsCOMPtr<nsIPropertyBag2> infoService = do_GetService("@mozilla.org/system-info;1");
- NS_ASSERTION(infoService, "Could not find a system info service");
-
- nsAutoCString key(aKey);
- nsAutoCString info;
- nsresult rv = infoService->GetPropertyAsACString(NS_ConvertUTF8toUTF16(key),
- info);
-
- NS_ENSURE_SUCCESS(rv, false);
-
- strncpy(aResult, info.get(), aResultLength);
-
- return true;
-}
-
-static PluginHost sPluginHost = {
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- GetIntPref,
- GetSystemInfoString
-};
-
-// Return true if Omx decoding is supported on the device. This checks the
-// built in whitelist/blacklist and preferences to see if that is overridden.
-static bool IsOmxSupported()
-{
- bool forceEnabled =
- Preferences::GetBool("stagefright.force-enabled", false);
- bool disabled =
- Preferences::GetBool("stagefright.disabled", false);
-
- if (disabled) {
- NS_WARNING("XXX stagefright disabled\n");
- return false;
- }
-
- if (!forceEnabled) {
- nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
- if (gfxInfo) {
- int32_t status;
- nsCString discardFailure;
- if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(nsIGfxInfo::FEATURE_STAGEFRIGHT, discardFailure, &status))) {
- if (status != nsIGfxInfo::FEATURE_STATUS_OK) {
- NS_WARNING("XXX stagefright blacklisted\n");
- return false;
- }
- }
- }
- }
-
- return true;
-}
-
-// Return the name of the shared library that implements Omx based decoding. This varies
-// depending on libstagefright version installed on the device and whether it is B2G vs Android.
-// nullptr is returned if Omx decoding is not supported on the device,
-static const char* GetOmxLibraryName()
-{
-#if defined(ANDROID)
- nsCOMPtr<nsIPropertyBag2> infoService = do_GetService("@mozilla.org/system-info;1");
- NS_ASSERTION(infoService, "Could not find a system info service");
-
- int32_t version;
- nsresult rv = infoService->GetPropertyAsInt32(NS_LITERAL_STRING("version"), &version);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Version is: %d", version);
- }
-
- nsAutoString release_version;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("release_version"), release_version);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Release Version is: %s", NS_LossyConvertUTF16toASCII(release_version).get());
- }
-
- nsAutoString device;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("device"), device);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Device is: %s", NS_LossyConvertUTF16toASCII(device).get());
- }
-
- nsAutoString manufacturer;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("manufacturer"), manufacturer);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Manufacturer is: %s", NS_LossyConvertUTF16toASCII(manufacturer).get());
- }
-
- nsAutoString hardware;
- rv = infoService->GetPropertyAsAString(NS_LITERAL_STRING("hardware"), hardware);
- if (NS_SUCCEEDED(rv)) {
- ALOG("Android Hardware is: %s", NS_LossyConvertUTF16toASCII(hardware).get());
- }
-#endif
-
- if (!IsOmxSupported())
- return nullptr;
-
-#if defined(ANDROID)
- if (version >= 17) {
- return "libomxpluginkk.so";
- }
-
- // Ice Cream Sandwich and Jellybean
- return "libomxplugin.so";
-
-#else
- return nullptr;
-#endif
-}
-
-AndroidMediaPluginHost::AndroidMediaPluginHost() {
- MOZ_COUNT_CTOR(AndroidMediaPluginHost);
- MOZ_ASSERT(NS_IsMainThread());
-
- mResourceServer = AndroidMediaResourceServer::Start();
-
- const char* name = GetOmxLibraryName();
- ALOG("Loading OMX Plugin: %s", name ? name : "nullptr");
- if (name) {
- char *path = PR_GetLibraryFilePathname("libxul.so", (PRFuncPtr) GetOmxLibraryName);
- PRLibrary *lib = nullptr;
- if (path) {
- nsAutoCString libpath(path);
- PR_Free(path);
- int32_t slash = libpath.RFindChar('/');
- if (slash != kNotFound) {
- libpath.Truncate(slash + 1);
- libpath.Append(name);
- lib = PR_LoadLibrary(libpath.get());
- }
- }
- if (!lib)
- lib = PR_LoadLibrary(name);
-
- if (lib) {
- Manifest *manifest = static_cast<Manifest *>(PR_FindSymbol(lib, "MPAPI_MANIFEST"));
- if (manifest) {
- mPlugins.AppendElement(manifest);
- ALOG("OMX plugin successfully loaded");
- }
- }
- }
-}
-
-AndroidMediaPluginHost::~AndroidMediaPluginHost() {
- mResourceServer->Stop();
- MOZ_COUNT_DTOR(AndroidMediaPluginHost);
-}
-
-bool AndroidMediaPluginHost::FindDecoder(const nsACString& aMimeType, const char* const** aCodecs)
-{
- const char *chars;
- size_t len = NS_CStringGetData(aMimeType, &chars, nullptr);
- for (size_t n = 0; n < mPlugins.Length(); ++n) {
- Manifest *plugin = mPlugins[n];
- const char* const *codecs;
- if (plugin->CanDecode(chars, len, &codecs)) {
- if (aCodecs)
- *aCodecs = codecs;
- return true;
- }
- }
- return false;
-}
-
-MPAPI::Decoder *AndroidMediaPluginHost::CreateDecoder(MediaResource *aResource, const nsACString& aMimeType)
-{
- NS_ENSURE_TRUE(aResource, nullptr);
-
- nsAutoPtr<Decoder> decoder(new Decoder());
- if (!decoder) {
- return nullptr;
- }
-
- const char *chars;
- size_t len = NS_CStringGetData(aMimeType, &chars, nullptr);
- for (size_t n = 0; n < mPlugins.Length(); ++n) {
- Manifest *plugin = mPlugins[n];
- const char* const *codecs;
- if (!plugin->CanDecode(chars, len, &codecs)) {
- continue;
- }
-
- nsCString url;
- nsresult rv = mResourceServer->AddResource(aResource, url);
- if (NS_FAILED (rv)) continue;
-
- decoder->mResource = strdup(url.get());
- if (plugin->CreateDecoder(&sPluginHost, decoder, chars, len)) {
- return decoder.forget();
- }
- }
-
- return nullptr;
-}
-
-void AndroidMediaPluginHost::DestroyDecoder(Decoder *aDecoder)
-{
- aDecoder->DestroyDecoder(aDecoder);
- char* resource = GetResource(aDecoder);
- if (resource) {
- // resource *shouldn't* be null, but check anyway just in case the plugin
- // decoder does something stupid.
- mResourceServer->RemoveResource(nsCString(resource));
- free(resource);
- }
- delete aDecoder;
-}
-
-AndroidMediaPluginHost *sAndroidMediaPluginHost = nullptr;
-AndroidMediaPluginHost *EnsureAndroidMediaPluginHost()
-{
- MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread());
- if (!sAndroidMediaPluginHost) {
- sAndroidMediaPluginHost = new AndroidMediaPluginHost();
- }
- return sAndroidMediaPluginHost;
-}
-
-AndroidMediaPluginHost *GetAndroidMediaPluginHost()
-{
- MOZ_ASSERT(sAndroidMediaPluginHost);
- return sAndroidMediaPluginHost;
-}
-
-void AndroidMediaPluginHost::Shutdown()
-{
- delete sAndroidMediaPluginHost;
- sAndroidMediaPluginHost = nullptr;
-}
-
-} // namespace mozilla
diff --git a/dom/media/android/AndroidMediaPluginHost.h b/dom/media/android/AndroidMediaPluginHost.h
deleted file mode 100644
index 854b7f21e..000000000
--- a/dom/media/android/AndroidMediaPluginHost.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaPluginHost_h_)
-#define AndroidMediaPluginHost_h_
-
-#include "nsTArray.h"
-#include "MediaResource.h"
-#include "MPAPI.h"
-#include "AndroidMediaResourceServer.h"
-
-namespace mozilla {
-
-class AndroidMediaPluginHost {
- RefPtr<AndroidMediaResourceServer> mResourceServer;
- nsTArray<MPAPI::Manifest *> mPlugins;
-
- MPAPI::Manifest *FindPlugin(const nsACString& aMimeType);
-public:
- AndroidMediaPluginHost();
- ~AndroidMediaPluginHost();
-
- static void Shutdown();
-
- bool FindDecoder(const nsACString& aMimeType, const char* const** aCodecs);
- MPAPI::Decoder *CreateDecoder(mozilla::MediaResource *aResource, const nsACString& aMimeType);
- void DestroyDecoder(MPAPI::Decoder *aDecoder);
-};
-
-// Must be called on the main thread. Creates the plugin host if it doesn't
-// already exist.
-AndroidMediaPluginHost *EnsureAndroidMediaPluginHost();
-
-// May be called on any thread after EnsureAndroidMediaPluginHost has been called.
-AndroidMediaPluginHost *GetAndroidMediaPluginHost();
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/AndroidMediaReader.cpp b/dom/media/android/AndroidMediaReader.cpp
deleted file mode 100644
index 12afacbc9..000000000
--- a/dom/media/android/AndroidMediaReader.cpp
+++ /dev/null
@@ -1,449 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "AndroidMediaReader.h"
-#include "mozilla/TimeStamp.h"
-#include "mozilla/gfx/Point.h"
-#include "MediaResource.h"
-#include "VideoUtils.h"
-#include "AndroidMediaDecoder.h"
-#include "AndroidMediaPluginHost.h"
-#include "MediaDecoderStateMachine.h"
-#include "ImageContainer.h"
-#include "AbstractMediaDecoder.h"
-#include "gfx2DGlue.h"
-#include "VideoFrameContainer.h"
-#include "mozilla/CheckedInt.h"
-
-namespace mozilla {
-
-using namespace mozilla::gfx;
-using namespace mozilla::media;
-
-typedef mozilla::layers::Image Image;
-typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
-
-AndroidMediaReader::AndroidMediaReader(AbstractMediaDecoder *aDecoder,
- const nsACString& aContentType) :
- MediaDecoderReader(aDecoder),
- mType(aContentType),
- mPlugin(nullptr),
- mHasAudio(false),
- mHasVideo(false),
- mVideoSeekTimeUs(-1),
- mAudioSeekTimeUs(-1)
-{
-}
-
-nsresult AndroidMediaReader::ReadMetadata(MediaInfo* aInfo,
- MetadataTags** aTags)
-{
- MOZ_ASSERT(OnTaskQueue());
-
- if (!mPlugin) {
- mPlugin = GetAndroidMediaPluginHost()->CreateDecoder(mDecoder->GetResource(), mType);
- if (!mPlugin) {
- return NS_ERROR_FAILURE;
- }
- }
-
- // Set the total duration (the max of the audio and video track).
- int64_t durationUs;
- mPlugin->GetDuration(mPlugin, &durationUs);
- if (durationUs) {
- mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(durationUs));
- }
-
- if (mPlugin->HasVideo(mPlugin)) {
- int32_t width, height;
- mPlugin->GetVideoParameters(mPlugin, &width, &height);
- nsIntRect pictureRect(0, 0, width, height);
-
- // Validate the container-reported frame and pictureRect sizes. This ensures
- // that our video frame creation code doesn't overflow.
- nsIntSize displaySize(width, height);
- nsIntSize frameSize(width, height);
- if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
- return NS_ERROR_FAILURE;
- }
-
- // Video track's frame sizes will not overflow. Activate the video track.
- mHasVideo = true;
- mInfo.mVideo.mDisplay = displaySize;
- mPicture = pictureRect;
- mInitialFrame = frameSize;
- VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
- if (container) {
- container->ClearCurrentFrame(IntSize(displaySize.width, displaySize.height));
- }
- }
-
- if (mPlugin->HasAudio(mPlugin)) {
- int32_t numChannels, sampleRate;
- mPlugin->GetAudioParameters(mPlugin, &numChannels, &sampleRate);
- mHasAudio = true;
- mInfo.mAudio.mChannels = numChannels;
- mInfo.mAudio.mRate = sampleRate;
- }
-
- *aInfo = mInfo;
- *aTags = nullptr;
- return NS_OK;
-}
-
-RefPtr<ShutdownPromise>
-AndroidMediaReader::Shutdown()
-{
- ResetDecode();
- if (mPlugin) {
- GetAndroidMediaPluginHost()->DestroyDecoder(mPlugin);
- mPlugin = nullptr;
- }
-
- return MediaDecoderReader::Shutdown();
-}
-
-// Resets all state related to decoding, emptying all buffers etc.
-nsresult AndroidMediaReader::ResetDecode(TrackSet aTracks)
-{
- if (mLastVideoFrame) {
- mLastVideoFrame = nullptr;
- }
- mSeekRequest.DisconnectIfExists();
- mSeekPromise.RejectIfExists(NS_OK, __func__);
- return MediaDecoderReader::ResetDecode(aTracks);
-}
-
-bool AndroidMediaReader::DecodeVideoFrame(bool &aKeyframeSkip,
- int64_t aTimeThreshold)
-{
- // Record number of frames decoded and parsed. Automatically update the
- // stats counters using the AutoNotifyDecoded stack-based class.
- AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);
-
- // Throw away the currently buffered frame if we are seeking.
- if (mLastVideoFrame && mVideoSeekTimeUs != -1) {
- mLastVideoFrame = nullptr;
- }
-
- ImageBufferCallback bufferCallback(mDecoder->GetImageContainer());
- RefPtr<Image> currentImage;
-
- // Read next frame
- while (true) {
- MPAPI::VideoFrame frame;
- if (!mPlugin->ReadVideo(mPlugin, &frame, mVideoSeekTimeUs, &bufferCallback)) {
- // We reached the end of the video stream. If we have a buffered
- // video frame, push it the video queue using the total duration
- // of the video as the end time.
- if (mLastVideoFrame) {
- int64_t durationUs;
- mPlugin->GetDuration(mPlugin, &durationUs);
- durationUs = std::max<int64_t>(durationUs - mLastVideoFrame->mTime, 0);
- RefPtr<VideoData> data = VideoData::ShallowCopyUpdateDuration(mLastVideoFrame,
- durationUs);
- mVideoQueue.Push(data);
- mLastVideoFrame = nullptr;
- }
- return false;
- }
- mVideoSeekTimeUs = -1;
-
- if (aKeyframeSkip) {
- // Disable keyframe skipping for now as
- // stagefright doesn't seem to be telling us
- // when a frame is a keyframe.
-#if 0
- if (!frame.mKeyFrame) {
- ++a.mStats.mParsedFrames;
- ++a.mStats.mDroppedFrames;
- continue;
- }
-#endif
- aKeyframeSkip = false;
- }
-
- if (frame.mSize == 0)
- return true;
-
- currentImage = bufferCallback.GetImage();
- int64_t pos = mDecoder->GetResource()->Tell();
- IntRect picture = mPicture;
-
- RefPtr<VideoData> v;
- if (currentImage) {
- gfx::IntSize frameSize = currentImage->GetSize();
- if (frameSize.width != mInitialFrame.width ||
- frameSize.height != mInitialFrame.height) {
- // Frame size is different from what the container reports. This is legal,
- // and we will preserve the ratio of the crop rectangle as it
- // was reported relative to the picture size reported by the container.
- picture.x = (mPicture.x * frameSize.width) / mInitialFrame.width;
- picture.y = (mPicture.y * frameSize.height) / mInitialFrame.height;
- picture.width = (frameSize.width * mPicture.width) / mInitialFrame.width;
- picture.height = (frameSize.height * mPicture.height) / mInitialFrame.height;
- }
-
- v = VideoData::CreateFromImage(mInfo.mVideo,
- pos,
- frame.mTimeUs,
- 1, // We don't know the duration yet.
- currentImage,
- frame.mKeyFrame,
- -1,
- picture);
- } else {
- // Assume YUV
- VideoData::YCbCrBuffer b;
- b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
- b.mPlanes[0].mStride = frame.Y.mStride;
- b.mPlanes[0].mHeight = frame.Y.mHeight;
- b.mPlanes[0].mWidth = frame.Y.mWidth;
- b.mPlanes[0].mOffset = frame.Y.mOffset;
- b.mPlanes[0].mSkip = frame.Y.mSkip;
-
- b.mPlanes[1].mData = static_cast<uint8_t *>(frame.Cb.mData);
- b.mPlanes[1].mStride = frame.Cb.mStride;
- b.mPlanes[1].mHeight = frame.Cb.mHeight;
- b.mPlanes[1].mWidth = frame.Cb.mWidth;
- b.mPlanes[1].mOffset = frame.Cb.mOffset;
- b.mPlanes[1].mSkip = frame.Cb.mSkip;
-
- b.mPlanes[2].mData = static_cast<uint8_t *>(frame.Cr.mData);
- b.mPlanes[2].mStride = frame.Cr.mStride;
- b.mPlanes[2].mHeight = frame.Cr.mHeight;
- b.mPlanes[2].mWidth = frame.Cr.mWidth;
- b.mPlanes[2].mOffset = frame.Cr.mOffset;
- b.mPlanes[2].mSkip = frame.Cr.mSkip;
-
- if (frame.Y.mWidth != mInitialFrame.width ||
- frame.Y.mHeight != mInitialFrame.height) {
-
- // Frame size is different from what the container reports. This is legal,
- // and we will preserve the ratio of the crop rectangle as it
- // was reported relative to the picture size reported by the container.
- picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width;
- picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height;
- picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width;
- picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height;
- }
-
- // This is the approximate byte position in the stream.
- v = VideoData::CreateAndCopyData(mInfo.mVideo,
- mDecoder->GetImageContainer(),
- pos,
- frame.mTimeUs,
- 1, // We don't know the duration yet.
- b,
- frame.mKeyFrame,
- -1,
- picture);
- }
-
- if (!v) {
- return false;
- }
- a.mStats.mParsedFrames++;
- a.mStats.mDecodedFrames++;
- NS_ASSERTION(a.mStats.mDecodedFrames <= a.mStats.mParsedFrames, "Expect to decode fewer frames than parsed in AndroidMedia...");
-
- // Since MPAPI doesn't give us the end time of frames, we keep one frame
- // buffered in AndroidMediaReader and push it into the queue as soon
- // we read the following frame so we can use that frame's start time as
- // the end time of the buffered frame.
- if (!mLastVideoFrame) {
- mLastVideoFrame = v;
- continue;
- }
-
- // Calculate the duration as the timestamp of the current frame minus the
- // timestamp of the previous frame. We can then return the previously
- // decoded frame, and it will have a valid timestamp.
- int64_t duration = v->mTime - mLastVideoFrame->mTime;
- mLastVideoFrame = VideoData::ShallowCopyUpdateDuration(mLastVideoFrame, duration);
-
- // We have the start time of the next frame, so we can push the previous
- // frame into the queue, except if the end time is below the threshold,
- // in which case it wouldn't be displayed anyway.
- if (mLastVideoFrame->GetEndTime() < aTimeThreshold) {
- mLastVideoFrame = nullptr;
- continue;
- }
-
- // Buffer the current frame we just decoded.
- mVideoQueue.Push(mLastVideoFrame);
- mLastVideoFrame = v;
-
- break;
- }
-
- return true;
-}
-
-bool AndroidMediaReader::DecodeAudioData()
-{
- MOZ_ASSERT(OnTaskQueue());
-
- // This is the approximate byte position in the stream.
- int64_t pos = mDecoder->GetResource()->Tell();
-
- // Read next frame
- MPAPI::AudioFrame source;
- if (!mPlugin->ReadAudio(mPlugin, &source, mAudioSeekTimeUs)) {
- return false;
- }
- mAudioSeekTimeUs = -1;
-
- // Ignore empty buffers which stagefright media read will sporadically return
- if (source.mSize == 0)
- return true;
-
- uint32_t frames = source.mSize / (source.mAudioChannels *
- sizeof(AudioDataValue));
-
- typedef AudioCompactor::NativeCopy MPCopy;
- return mAudioCompactor.Push(pos,
- source.mTimeUs,
- source.mAudioSampleRate,
- frames,
- source.mAudioChannels,
- MPCopy(static_cast<uint8_t *>(source.mData),
- source.mSize,
- source.mAudioChannels));
-}
-
-RefPtr<MediaDecoderReader::SeekPromise>
-AndroidMediaReader::Seek(SeekTarget aTarget, int64_t aEndTime)
-{
- MOZ_ASSERT(OnTaskQueue());
-
- RefPtr<SeekPromise> p = mSeekPromise.Ensure(__func__);
- if (mHasAudio && mHasVideo) {
- // The decoder seeks/demuxes audio and video streams separately. So if
- // we seek both audio and video to aTarget, the audio stream can typically
- // seek closer to the seek target, since typically every audio block is
- // a sync point, whereas for video there are only keyframes once every few
- // seconds. So if we have both audio and video, we must seek the video
- // stream to the preceeding keyframe first, get the stream time, and then
- // seek the audio stream to match the video stream's time. Otherwise, the
- // audio and video streams won't be in sync after the seek.
- mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
-
- RefPtr<AndroidMediaReader> self = this;
- mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
- self->mSeekRequest.Complete();
- self->mAudioSeekTimeUs = v->mTime;
- self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__);
- }, [self, aTarget] () {
- self->mSeekRequest.Complete();
- self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds();
- self->mSeekPromise.Resolve(aTarget.GetTime(), __func__);
- }));
- } else {
- mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
- mSeekPromise.Resolve(aTarget.GetTime(), __func__);
- }
-
- return p;
-}
-
-AndroidMediaReader::ImageBufferCallback::ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer) :
- mImageContainer(aImageContainer)
-{
-}
-
-void *
-AndroidMediaReader::ImageBufferCallback::operator()(size_t aWidth, size_t aHeight,
- MPAPI::ColorFormat aColorFormat)
-{
- if (!mImageContainer) {
- NS_WARNING("No image container to construct an image");
- return nullptr;
- }
-
- RefPtr<Image> image;
- switch(aColorFormat) {
- case MPAPI::RGB565:
- image = mozilla::layers::CreateSharedRGBImage(mImageContainer,
- nsIntSize(aWidth, aHeight),
- SurfaceFormat::R5G6B5_UINT16);
- if (!image) {
- NS_WARNING("Could not create rgb image");
- return nullptr;
- }
-
- mImage = image;
- return image->GetBuffer();
- case MPAPI::I420:
- return CreateI420Image(aWidth, aHeight);
- default:
- NS_NOTREACHED("Color format not supported");
- return nullptr;
- }
-}
-
-uint8_t *
-AndroidMediaReader::ImageBufferCallback::CreateI420Image(size_t aWidth,
- size_t aHeight)
-{
- RefPtr<PlanarYCbCrImage> yuvImage = mImageContainer->CreatePlanarYCbCrImage();
- mImage = yuvImage;
-
- if (!yuvImage) {
- NS_WARNING("Could not create I420 image");
- return nullptr;
- }
-
- // Use uint32_t throughout to match AllocateAndGetNewBuffer's param
- const auto checkedFrameSize =
- CheckedInt<uint32_t>(aWidth) * aHeight;
-
- // Allocate enough for one full resolution Y plane
- // and two quarter resolution Cb/Cr planes.
- const auto checkedBufferSize =
- checkedFrameSize + checkedFrameSize / 2;
-
- if (!checkedBufferSize.isValid()) { // checks checkedFrameSize too
- NS_WARNING("Could not create I420 image");
- return nullptr;
- }
-
- const auto frameSize = checkedFrameSize.value();
-
- uint8_t *buffer =
- yuvImage->AllocateAndGetNewBuffer(checkedBufferSize.value());
-
- mozilla::layers::PlanarYCbCrData frameDesc;
-
- frameDesc.mYChannel = buffer;
- frameDesc.mCbChannel = buffer + frameSize;
- frameDesc.mCrChannel = frameDesc.mCbChannel + frameSize / 4;
-
- frameDesc.mYSize = IntSize(aWidth, aHeight);
- frameDesc.mCbCrSize = IntSize(aWidth / 2, aHeight / 2);
-
- frameDesc.mYStride = aWidth;
- frameDesc.mCbCrStride = aWidth / 2;
-
- frameDesc.mYSkip = 0;
- frameDesc.mCbSkip = 0;
- frameDesc.mCrSkip = 0;
-
- frameDesc.mPicX = 0;
- frameDesc.mPicY = 0;
- frameDesc.mPicSize = IntSize(aWidth, aHeight);
-
- yuvImage->AdoptData(frameDesc);
-
- return buffer;
-}
-
-already_AddRefed<Image>
-AndroidMediaReader::ImageBufferCallback::GetImage()
-{
- return mImage.forget();
-}
-
-} // namespace mozilla
diff --git a/dom/media/android/AndroidMediaReader.h b/dom/media/android/AndroidMediaReader.h
deleted file mode 100644
index def85a343..000000000
--- a/dom/media/android/AndroidMediaReader.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaReader_h_)
-#define AndroidMediaReader_h_
-
-#include "mozilla/Attributes.h"
-#include "MediaResource.h"
-#include "MediaDecoderReader.h"
-#include "ImageContainer.h"
-#include "mozilla/layers/SharedRGBImage.h"
-
-#include "MPAPI.h"
-
-class nsACString;
-
-namespace mozilla {
-
-class AbstractMediaDecoder;
-
-namespace layers {
-class ImageContainer;
-}
-
-class AndroidMediaReader : public MediaDecoderReader
-{
- nsCString mType;
- MPAPI::Decoder *mPlugin;
- bool mHasAudio;
- bool mHasVideo;
- nsIntRect mPicture;
- nsIntSize mInitialFrame;
- int64_t mVideoSeekTimeUs;
- int64_t mAudioSeekTimeUs;
- RefPtr<VideoData> mLastVideoFrame;
- MozPromiseHolder<MediaDecoderReader::SeekPromise> mSeekPromise;
- MozPromiseRequestHolder<MediaDecoderReader::MediaDataPromise> mSeekRequest;
-public:
- AndroidMediaReader(AbstractMediaDecoder* aDecoder,
- const nsACString& aContentType);
-
- nsresult ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
- TrackInfo::kVideoTrack)) override;
-
- bool DecodeAudioData() override;
- bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) override;
-
- nsresult ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) override;
- RefPtr<SeekPromise> Seek(SeekTarget aTarget, int64_t aEndTime) override;
-
- RefPtr<ShutdownPromise> Shutdown() override;
-
- class ImageBufferCallback : public MPAPI::BufferCallback {
- typedef mozilla::layers::Image Image;
-
- public:
- ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer);
- void *operator()(size_t aWidth, size_t aHeight,
- MPAPI::ColorFormat aColorFormat) override;
- already_AddRefed<Image> GetImage();
-
- private:
- uint8_t *CreateI420Image(size_t aWidth, size_t aHeight);
-
- mozilla::layers::ImageContainer *mImageContainer;
- RefPtr<Image> mImage;
- };
-
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/AndroidMediaResourceServer.cpp b/dom/media/android/AndroidMediaResourceServer.cpp
deleted file mode 100644
index bd76a8c68..000000000
--- a/dom/media/android/AndroidMediaResourceServer.cpp
+++ /dev/null
@@ -1,503 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "mozilla/Assertions.h"
-#include "mozilla/Base64.h"
-#include "mozilla/IntegerPrintfMacros.h"
-#include "mozilla/UniquePtr.h"
-#include "nsThreadUtils.h"
-#include "nsIServiceManager.h"
-#include "nsISocketTransport.h"
-#include "nsIOutputStream.h"
-#include "nsIInputStream.h"
-#include "nsIRandomGenerator.h"
-#include "nsReadLine.h"
-#include "nsNetCID.h"
-#include "VideoUtils.h"
-#include "MediaResource.h"
-#include "AndroidMediaResourceServer.h"
-
-#if defined(_MSC_VER)
-#define strtoll _strtoi64
-#endif
-
-using namespace mozilla;
-
-/*
- ReadCRLF is a variant of NS_ReadLine from nsReadLine.h that deals
- with the carriage return/line feed requirements of HTTP requests.
-*/
-template<typename CharT, class StreamType, class StringType>
-nsresult
-ReadCRLF (StreamType* aStream, nsLineBuffer<CharT> * aBuffer,
- StringType & aLine, bool *aMore)
-{
- // eollast is true if the last character in the buffer is a '\r',
- // signaling a potential '\r\n' sequence split between reads.
- bool eollast = false;
-
- aLine.Truncate();
-
- while (1) { // will be returning out of this loop on eol or eof
- if (aBuffer->start == aBuffer->end) { // buffer is empty. Read into it.
- uint32_t bytesRead;
- nsresult rv = aStream->Read(aBuffer->buf, kLineBufferSize, &bytesRead);
- if (NS_FAILED(rv) || bytesRead == 0) {
- *aMore = false;
- return rv;
- }
- aBuffer->start = aBuffer->buf;
- aBuffer->end = aBuffer->buf + bytesRead;
- *(aBuffer->end) = '\0';
- }
-
- /*
- * Walk the buffer looking for an end-of-line.
- * There are 4 cases to consider:
- * 1. the CR char is the last char in the buffer
- * 2. the CRLF sequence are the last characters in the buffer
- * 3. the CRLF sequence + one or more chars at the end of the buffer
- * we need at least one char after the first CRLF sequence to
- * set |aMore| correctly.
- * 4. The LF character is the first char in the buffer when eollast is
- * true.
- */
- CharT* current = aBuffer->start;
- if (eollast) { // Case 4
- if (*current == '\n') {
- aBuffer->start = ++current;
- *aMore = true;
- return NS_OK;
- }
- else {
- eollast = false;
- aLine.Append('\r');
- }
- }
- // Cases 2 and 3
- for ( ; current < aBuffer->end-1; ++current) {
- if (*current == '\r' && *(current+1) == '\n') {
- *current++ = '\0';
- *current++ = '\0';
- aLine.Append(aBuffer->start);
- aBuffer->start = current;
- *aMore = true;
- return NS_OK;
- }
- }
- // Case 1
- if (*current == '\r') {
- eollast = true;
- *current++ = '\0';
- }
-
- aLine.Append(aBuffer->start);
- aBuffer->start = aBuffer->end; // mark the buffer empty
- }
-}
-
-// Each client HTTP request results in a thread being spawned to process it.
-// That thread has a single event dispatched to it which handles the HTTP
-// protocol. It parses the headers and forwards data from the MediaResource
-// associated with the URL back to client. When the request is complete it will
-// shutdown the thread.
-class ServeResourceEvent : public Runnable {
-private:
- // Reading from this reads the data sent from the client.
- nsCOMPtr<nsIInputStream> mInput;
-
- // Writing to this sends data to the client.
- nsCOMPtr<nsIOutputStream> mOutput;
-
- // The AndroidMediaResourceServer that owns the MediaResource instances
- // served. This is used to lookup the MediaResource from the URL.
- RefPtr<AndroidMediaResourceServer> mServer;
-
- // Write 'aBufferLength' bytes from 'aBuffer' to 'mOutput'. This
- // method ensures all the data is written by checking the number
- // of bytes returned from the output streams 'Write' method and
- // looping until done.
- nsresult WriteAll(char const* aBuffer, int32_t aBufferLength);
-
-public:
- ServeResourceEvent(nsIInputStream* aInput, nsIOutputStream* aOutput,
- AndroidMediaResourceServer* aServer)
- : mInput(aInput), mOutput(aOutput), mServer(aServer) {}
-
- // This method runs on the thread and exits when it has completed the
- // HTTP request.
- NS_IMETHOD Run();
-
- // Given the first line of an HTTP request, parse the URL requested and
- // return the MediaResource for that URL.
- already_AddRefed<MediaResource> GetMediaResource(nsCString const& aHTTPRequest);
-
- // Gracefully shutdown the thread and cleanup resources
- void Shutdown();
-};
-
-nsresult
-ServeResourceEvent::WriteAll(char const* aBuffer, int32_t aBufferLength)
-{
- while (aBufferLength > 0) {
- uint32_t written = 0;
- nsresult rv = mOutput->Write(aBuffer, aBufferLength, &written);
- if (NS_FAILED (rv)) return rv;
-
- aBufferLength -= written;
- aBuffer += written;
- }
-
- return NS_OK;
-}
-
-already_AddRefed<MediaResource>
-ServeResourceEvent::GetMediaResource(nsCString const& aHTTPRequest)
-{
- // Check that the HTTP method is GET
- const char* HTTP_METHOD = "GET ";
- if (strncmp(aHTTPRequest.get(), HTTP_METHOD, strlen(HTTP_METHOD)) != 0) {
- return nullptr;
- }
-
- const char* url_start = strchr(aHTTPRequest.get(), ' ');
- if (!url_start) {
- return nullptr;
- }
-
- const char* url_end = strrchr(++url_start, ' ');
- if (!url_end) {
- return nullptr;
- }
-
- // The path extracted from the HTTP request is used as a key in hash
- // table. It is not related to retrieving data from the filesystem so
- // we don't need to do any sanity checking on ".." paths and similar
- // exploits.
- nsCString relative(url_start, url_end - url_start);
- RefPtr<MediaResource> resource =
- mServer->GetResource(mServer->GetURLPrefix() + relative);
- return resource.forget();
-}
-
-NS_IMETHODIMP
-ServeResourceEvent::Run() {
- bool more = false; // Are there HTTP headers to read after the first line
- nsCString line; // Contains the current line read from input stream
- nsLineBuffer<char>* buffer = new nsLineBuffer<char>();
- nsresult rv = ReadCRLF(mInput.get(), buffer, line, &more);
- if (NS_FAILED(rv)) { Shutdown(); return rv; }
-
- // First line contains the HTTP GET request. Extract the URL and obtain
- // the MediaResource for it.
- RefPtr<MediaResource> resource = GetMediaResource(line);
- if (!resource) {
- const char* response_404 = "HTTP/1.1 404 Not Found\r\n"
- "Content-Length: 0\r\n\r\n";
- rv = WriteAll(response_404, strlen(response_404));
- Shutdown();
- return rv;
- }
-
- // Offset in bytes to start reading from resource.
- // This is zero by default but can be set to another starting value if
- // this HTTP request includes a byte range request header.
- int64_t start = 0;
-
- // Keep reading lines until we get a zero length line, which is the HTTP
- // protocol's way of signifying the end of headers and start of body, or
- // until we have no more data to read.
- while (more && line.Length() > 0) {
- rv = ReadCRLF(mInput.get(), buffer, line, &more);
- if (NS_FAILED(rv)) { Shutdown(); return rv; }
-
- // Look for a byte range request header. If there is one, set the
- // media resource offset to start from to that requested. Here we
- // only check for the range request format used by Android rather
- // than implementing all possibilities in the HTTP specification.
- // That is, the range request is of the form:
- // Range: bytes=nnnn-
- // Were 'nnnn' is an integer number.
- // The end of the range is not checked, instead we return up to
- // the end of the resource and the client is informed of this via
- // the content-range header.
- NS_NAMED_LITERAL_CSTRING(byteRange, "Range: bytes=");
- const char* s = strstr(line.get(), byteRange.get());
- if (s) {
- start = strtoll(s+byteRange.Length(), nullptr, 10);
-
- // Clamp 'start' to be between 0 and the resource length.
- start = std::max(int64_t(0), std::min(resource->GetLength(), start));
- }
- }
-
- // HTTP response to use if this is a non byte range request
- const char* response_normal = "HTTP/1.1 200 OK\r\n";
-
- // HTTP response to use if this is a byte range request
- const char* response_range = "HTTP/1.1 206 Partial Content\r\n";
-
- // End of HTTP reponse headers is indicated by an empty line.
- const char* response_end = "\r\n";
-
- // If the request was a byte range request, we need to read from the
- // requested offset. If the resource is non-seekable, or the seek
- // fails, then the start offset is set back to zero. This results in all
- // HTTP response data being as if the byte range request was not made.
- if (start > 0 && !resource->IsTransportSeekable()) {
- start = 0;
- }
-
- const char* response_line = start > 0 ?
- response_range :
- response_normal;
- rv = WriteAll(response_line, strlen(response_line));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
-
- // Buffer used for reading from the input stream and writing to
- // the output stream. The buffer size should be big enough for the
- // HTTP response headers sent below. A static_assert ensures
- // this where the buffer is used.
- const int buffer_size = 32768;
- auto b = MakeUnique<char[]>(buffer_size);
-
- // If we know the length of the resource, send a Content-Length header.
- int64_t contentlength = resource->GetLength() - start;
- if (contentlength > 0) {
- static_assert (buffer_size > 1024,
- "buffer_size must be large enough "
- "to hold response headers");
- snprintf(b.get(), buffer_size, "Content-Length: %" PRId64 "\r\n", contentlength);
- rv = WriteAll(b.get(), strlen(b.get()));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
- }
-
- // If the request was a byte range request, respond with a Content-Range
- // header which details the extent of the data returned.
- if (start > 0) {
- static_assert (buffer_size > 1024,
- "buffer_size must be large enough "
- "to hold response headers");
- snprintf(b.get(), buffer_size, "Content-Range: "
- "bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n",
- start, resource->GetLength() - 1, resource->GetLength());
- rv = WriteAll(b.get(), strlen(b.get()));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
- }
-
- rv = WriteAll(response_end, strlen(response_end));
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
-
- rv = mOutput->Flush();
- if (NS_FAILED(rv)) { Shutdown(); return NS_OK; }
-
- // Read data from media resource
- uint32_t bytesRead = 0; // Number of bytes read/written to streams
- rv = resource->ReadAt(start, b.get(), buffer_size, &bytesRead);
- while (NS_SUCCEEDED(rv) && bytesRead != 0) {
- // Keep track of what we think the starting position for the next read
- // is. This is used in subsequent ReadAt calls to ensure we are reading
- // from the correct offset in the case where another thread is reading
- // from th same MediaResource.
- start += bytesRead;
-
- // Write data obtained from media resource to output stream
- rv = WriteAll(b.get(), bytesRead);
- if (NS_FAILED (rv)) break;
-
- rv = resource->ReadAt(start, b.get(), 32768, &bytesRead);
- }
-
- Shutdown();
- return NS_OK;
-}
-
-void
-ServeResourceEvent::Shutdown()
-{
- // Cleanup resources and exit.
- mInput->Close();
- mOutput->Close();
-
- // To shutdown the current thread we need to first exit this event.
- // The Shutdown event below is posted to the main thread to do this.
- nsCOMPtr<nsIRunnable> event = new ShutdownThreadEvent(NS_GetCurrentThread());
- NS_DispatchToMainThread(event);
-}
-
-/*
- This is the listener attached to the server socket. When an HTTP
- request is made by the client the OnSocketAccepted method is
- called. This method will spawn a thread to process the request.
- The thread receives a single event which does the parsing of
- the HTTP request and forwarding the data from the MediaResource
- to the output stream of the request.
-
- The MediaResource used for providing the request data is obtained
- from the AndroidMediaResourceServer that created this listener, using the
- URL the client requested.
-*/
-class ResourceSocketListener : public nsIServerSocketListener
-{
-public:
- // The AndroidMediaResourceServer used to look up the MediaResource
- // on requests.
- RefPtr<AndroidMediaResourceServer> mServer;
-
- NS_DECL_THREADSAFE_ISUPPORTS
- NS_DECL_NSISERVERSOCKETLISTENER
-
- ResourceSocketListener(AndroidMediaResourceServer* aServer) :
- mServer(aServer)
- {
- }
-
-private:
- virtual ~ResourceSocketListener() { }
-};
-
-NS_IMPL_ISUPPORTS(ResourceSocketListener, nsIServerSocketListener)
-
-NS_IMETHODIMP
-ResourceSocketListener::OnSocketAccepted(nsIServerSocket* aServ,
- nsISocketTransport* aTrans)
-{
- nsCOMPtr<nsIInputStream> input;
- nsCOMPtr<nsIOutputStream> output;
- nsresult rv;
-
- rv = aTrans->OpenInputStream(nsITransport::OPEN_BLOCKING, 0, 0, getter_AddRefs(input));
- if (NS_FAILED(rv)) return rv;
-
- rv = aTrans->OpenOutputStream(nsITransport::OPEN_BLOCKING, 0, 0, getter_AddRefs(output));
- if (NS_FAILED(rv)) return rv;
-
- nsCOMPtr<nsIThread> thread;
- rv = NS_NewThread(getter_AddRefs(thread));
- if (NS_FAILED(rv)) return rv;
-
- nsCOMPtr<nsIRunnable> event = new ServeResourceEvent(input.get(), output.get(), mServer);
- return thread->Dispatch(event, NS_DISPATCH_NORMAL);
-}
-
-NS_IMETHODIMP
-ResourceSocketListener::OnStopListening(nsIServerSocket* aServ, nsresult aStatus)
-{
- return NS_OK;
-}
-
-AndroidMediaResourceServer::AndroidMediaResourceServer() :
- mMutex("AndroidMediaResourceServer")
-{
-}
-
-NS_IMETHODIMP
-AndroidMediaResourceServer::Run()
-{
- MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread());
- MutexAutoLock lock(mMutex);
-
- nsresult rv;
- mSocket = do_CreateInstance(NS_SERVERSOCKET_CONTRACTID, &rv);
- if (NS_FAILED(rv)) return rv;
-
- rv = mSocket->InitSpecialConnection(-1,
- nsIServerSocket::LoopbackOnly
- | nsIServerSocket::KeepWhenOffline,
- -1);
- if (NS_FAILED(rv)) return rv;
-
- rv = mSocket->AsyncListen(new ResourceSocketListener(this));
- if (NS_FAILED(rv)) return rv;
-
- return NS_OK;
-}
-
-/* static */
-already_AddRefed<AndroidMediaResourceServer>
-AndroidMediaResourceServer::Start()
-{
- MOZ_ASSERT(NS_IsMainThread());
- RefPtr<AndroidMediaResourceServer> server = new AndroidMediaResourceServer();
- server->Run();
- return server.forget();
-}
-
-void
-AndroidMediaResourceServer::Stop()
-{
- MutexAutoLock lock(mMutex);
- mSocket->Close();
- mSocket = nullptr;
-}
-
-nsresult
-AndroidMediaResourceServer::AppendRandomPath(nsCString& aUrl)
-{
- // Use a cryptographic quality PRNG to generate raw random bytes
- // and convert that to a base64 string for use as an URL path. This
- // is based on code from nsExternalAppHandler::SetUpTempFile.
- nsresult rv;
- nsAutoCString salt;
- rv = GenerateRandomPathName(salt, 16);
- if (NS_FAILED(rv)) return rv;
- aUrl += "/";
- aUrl += salt;
- return NS_OK;
-}
-
-nsresult
-AndroidMediaResourceServer::AddResource(mozilla::MediaResource* aResource, nsCString& aUrl)
-{
- nsCString url = GetURLPrefix();
- nsresult rv = AppendRandomPath(url);
- if (NS_FAILED (rv)) return rv;
-
- {
- MutexAutoLock lock(mMutex);
-
- // Adding a resource URL that already exists is considered an error.
- if (mResources.find(url) != mResources.end()) return NS_ERROR_FAILURE;
- mResources[url] = aResource;
- }
-
- aUrl = url;
-
- return NS_OK;
-}
-
-void
-AndroidMediaResourceServer::RemoveResource(nsCString const& aUrl)
-{
- MutexAutoLock lock(mMutex);
- mResources.erase(aUrl);
-}
-
-nsCString
-AndroidMediaResourceServer::GetURLPrefix()
-{
- MutexAutoLock lock(mMutex);
-
- int32_t port = 0;
- nsresult rv = mSocket->GetPort(&port);
- if (NS_FAILED (rv) || port < 0) {
- return nsCString("");
- }
-
- char buffer[256];
- snprintf(buffer, sizeof(buffer), "http://127.0.0.1:%d", port >= 0 ? port : 0);
- return nsCString(buffer);
-}
-
-already_AddRefed<MediaResource>
-AndroidMediaResourceServer::GetResource(nsCString const& aUrl)
-{
- MutexAutoLock lock(mMutex);
- ResourceMap::const_iterator it = mResources.find(aUrl);
- if (it == mResources.end()) return nullptr;
-
- RefPtr<MediaResource> resource = it->second;
- return resource.forget();
-}
diff --git a/dom/media/android/AndroidMediaResourceServer.h b/dom/media/android/AndroidMediaResourceServer.h
deleted file mode 100644
index 68200f9c0..000000000
--- a/dom/media/android/AndroidMediaResourceServer.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(AndroidMediaResourceServer_h_)
-#define AndroidMediaResourceServer_h_
-
-#include <map>
-#include "nsIServerSocket.h"
-#include "MediaResource.h"
-
-namespace mozilla {
-
-class MediaResource;
-
-/*
- AndroidMediaResourceServer instantiates a socket server that understands
- HTTP requests for MediaResource instances. The server runs on an
- automatically selected port and MediaResource instances are registered.
- The registration returns a string URL than can be used to fetch the
- resource. That URL contains a randomly generated path to make it
- difficult for other local applications on the device to guess it.
-
- The HTTP protocol is limited in that it supports only what the
- Android DataSource implementation uses to fetch media. It
- understands HTTP GET and byte range requests.
-
- The intent of this class is to be used in Media backends that
- have a system component that does its own network requests. These
- requests are made against this server which then uses standard
- Gecko network requests and media cache usage.
-
- The AndroidMediaResourceServer can be instantiated on any thread and
- its methods are threadsafe - they can be called on any thread.
- The server socket itself is always run on the main thread and
- this is done by the Start() static method by synchronously
- dispatching to the main thread.
-*/
-class AndroidMediaResourceServer : public Runnable
-{
-private:
- // Mutex protecting private members of AndroidMediaResourceServer.
- // All member variables below this point in the class definition
- // must acquire the mutex before access.
- mozilla::Mutex mMutex;
-
- // Server socket used to listen for incoming connections
- nsCOMPtr<nsIServerSocket> mSocket;
-
- // Mapping between MediaResource URL's to the MediaResource
- // object served at that URL.
- typedef std::map<nsCString,
- RefPtr<mozilla::MediaResource> > ResourceMap;
- ResourceMap mResources;
-
- // Create a AndroidMediaResourceServer that will listen on an automatically
- // selected port when started. This is private as it should only be
- // called internally from the public 'Start' method.
- AndroidMediaResourceServer();
- NS_IMETHOD Run();
-
- // Append a random URL path to a string. This is used for creating a
- // unique URl for a resource which helps prevent malicious software
- // running on the same machine as the server from guessing the URL
- // and accessing video data.
- nsresult AppendRandomPath(nsCString& aURL);
-
-public:
- // Create a AndroidMediaResourceServer and start it listening. This call will
- // perform a synchronous request on the main thread.
- static already_AddRefed<AndroidMediaResourceServer> Start();
-
- // Stops the server from listening and accepting further connections.
- void Stop();
-
- // Add a MediaResource to be served by this server. Stores the
- // absolute URL that can be used to access the resource in 'aUrl'.
- nsresult AddResource(mozilla::MediaResource* aResource, nsCString& aUrl);
-
- // Remove a MediaResource so it is no longer served by this server.
- // The URL provided must match exactly that provided by a previous
- // call to "AddResource".
- void RemoveResource(nsCString const& aUrl);
-
- // Returns the prefix for HTTP requests to the server. This plus
- // the result of AddResource results in an Absolute URL.
- nsCString GetURLPrefix();
-
- // Returns the resource asociated with a given URL
- already_AddRefed<mozilla::MediaResource> GetResource(nsCString const& aUrl);
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/android/MPAPI.h b/dom/media/android/MPAPI.h
deleted file mode 100644
index 9b289ca09..000000000
--- a/dom/media/android/MPAPI.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-#if !defined(MPAPI_h_)
-#define MPAPI_h_
-
-#include <stdint.h>
-
-namespace MPAPI {
-
-enum ColorFormat {
- I420,
- RGB565
-};
-
-/*
- * A callback for the plugin to use to request a buffer owned by gecko. This can
- * save us a copy or two down the line.
- */
-class BufferCallback {
-public:
- virtual void *operator()(size_t aWidth, size_t aHeight,
- ColorFormat aColorFormat) = 0;
-};
-
-struct VideoPlane {
- VideoPlane() :
- mData(0),
- mStride(0),
- mWidth(0),
- mHeight(0),
- mOffset(0),
- mSkip(0)
- {}
-
- void *mData;
- int32_t mStride;
- int32_t mWidth;
- int32_t mHeight;
- int32_t mOffset;
- int32_t mSkip;
-};
-
-struct VideoFrame {
- int64_t mTimeUs;
- bool mKeyFrame;
- void *mData;
- size_t mSize;
- int32_t mStride;
- int32_t mSliceHeight;
- int32_t mRotation;
- VideoPlane Y;
- VideoPlane Cb;
- VideoPlane Cr;
-
- VideoFrame() :
- mTimeUs(0),
- mKeyFrame(false),
- mData(0),
- mSize(0),
- mStride(0),
- mSliceHeight(0),
- mRotation(0)
- {}
-
- void Set(int64_t aTimeUs, bool aKeyFrame,
- void *aData, size_t aSize, int32_t aStride, int32_t aSliceHeight, int32_t aRotation,
- void *aYData, int32_t aYStride, int32_t aYWidth, int32_t aYHeight, int32_t aYOffset, int32_t aYSkip,
- void *aCbData, int32_t aCbStride, int32_t aCbWidth, int32_t aCbHeight, int32_t aCbOffset, int32_t aCbSkip,
- void *aCrData, int32_t aCrStride, int32_t aCrWidth, int32_t aCrHeight, int32_t aCrOffset, int32_t aCrSkip)
- {
- mTimeUs = aTimeUs;
- mKeyFrame = aKeyFrame;
- mData = aData;
- mSize = aSize;
- mStride = aStride;
- mSliceHeight = aSliceHeight;
- mRotation = aRotation;
- Y.mData = aYData;
- Y.mStride = aYStride;
- Y.mWidth = aYWidth;
- Y.mHeight = aYHeight;
- Y.mOffset = aYOffset;
- Y.mSkip = aYSkip;
- Cb.mData = aCbData;
- Cb.mStride = aCbStride;
- Cb.mWidth = aCbWidth;
- Cb.mHeight = aCbHeight;
- Cb.mOffset = aCbOffset;
- Cb.mSkip = aCbSkip;
- Cr.mData = aCrData;
- Cr.mStride = aCrStride;
- Cr.mWidth = aCrWidth;
- Cr.mHeight = aCrHeight;
- Cr.mOffset = aCrOffset;
- Cr.mSkip = aCrSkip;
- }
-};
-
-struct AudioFrame {
- int64_t mTimeUs;
- void *mData; // 16PCM interleaved
- size_t mSize; // Size of mData in bytes
- int32_t mAudioChannels;
- int32_t mAudioSampleRate;
-
- AudioFrame() :
- mTimeUs(0),
- mData(0),
- mSize(0),
- mAudioChannels(0),
- mAudioSampleRate(0)
- {
- }
-
- void Set(int64_t aTimeUs,
- void *aData, size_t aSize,
- int32_t aAudioChannels, int32_t aAudioSampleRate)
- {
- mTimeUs = aTimeUs;
- mData = aData;
- mSize = aSize;
- mAudioChannels = aAudioChannels;
- mAudioSampleRate = aAudioSampleRate;
- }
-};
-
-struct Decoder;
-
-struct PluginHost {
- bool (*Read)(Decoder *aDecoder, char *aBuffer, int64_t aOffset, uint32_t aCount, uint32_t* aBytes);
- uint64_t (*GetLength)(Decoder *aDecoder);
- void (*SetMetaDataReadMode)(Decoder *aDecoder);
- void (*SetPlaybackReadMode)(Decoder *aDecoder);
- bool (*GetIntPref)(const char *aPref, int32_t *aResult);
- bool (*GetSystemInfoString)(const char *aKey, char *aResult, size_t aResultLen);
-};
-
-struct Decoder {
- void *mResource;
- void *mPrivate;
-
- Decoder();
-
- void (*GetDuration)(Decoder *aDecoder, int64_t *durationUs);
- void (*GetVideoParameters)(Decoder *aDecoder, int32_t *aWidth, int32_t *aHeight);
- void (*GetAudioParameters)(Decoder *aDecoder, int32_t *aNumChannels, int32_t *aSampleRate);
- bool (*HasVideo)(Decoder *aDecoder);
- bool (*HasAudio)(Decoder *aDecoder);
- bool (*ReadVideo)(Decoder *aDecoder, VideoFrame *aFrame, int64_t aSeekTimeUs, BufferCallback *aBufferCallback);
- bool (*ReadAudio)(Decoder *aDecoder, AudioFrame *aFrame, int64_t aSeekTimeUs);
- void (*DestroyDecoder)(Decoder *);
-};
-
-struct Manifest {
- bool (*CanDecode)(const char *aMimeChars, size_t aMimeLen, const char* const**aCodecs);
- bool (*CreateDecoder)(PluginHost *aPluginHost, Decoder *aDecoder,
- const char *aMimeChars, size_t aMimeLen);
-};
-
-}
-
-#endif
diff --git a/dom/media/android/moz.build b/dom/media/android/moz.build
deleted file mode 100644
index 3ad43cd50..000000000
--- a/dom/media/android/moz.build
+++ /dev/null
@@ -1,27 +0,0 @@
-# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
-# vim: set filetype=python:
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-EXPORTS += [
- 'AndroidMediaDecoder.h',
- 'AndroidMediaPluginHost.h',
- 'AndroidMediaReader.h',
- 'AndroidMediaResourceServer.h',
- 'MPAPI.h',
-]
-
-UNIFIED_SOURCES += [
- 'AndroidMediaDecoder.cpp',
- 'AndroidMediaPluginHost.cpp',
- 'AndroidMediaReader.cpp',
- 'AndroidMediaResourceServer.cpp',
-]
-
-LOCAL_INCLUDES += [
- '/dom/base',
- '/dom/html',
-]
-
-FINAL_LIBRARY = 'xul'
diff --git a/dom/media/directshow/AudioSinkFilter.cpp b/dom/media/directshow/AudioSinkFilter.cpp
deleted file mode 100644
index 9f23c0e00..000000000
--- a/dom/media/directshow/AudioSinkFilter.cpp
+++ /dev/null
@@ -1,285 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "SampleSink.h"
-#include "AudioSinkFilter.h"
-#include "AudioSinkInputPin.h"
-#include "VideoUtils.h"
-#include "mozilla/Logging.h"
-
-
-#include <initguid.h>
-#include <wmsdkidl.h>
-
-#define DELETE_RESET(p) { delete (p) ; (p) = nullptr ;}
-
-DEFINE_GUID(CLSID_MozAudioSinkFilter, 0x1872d8c8, 0xea8d, 0x4c34, 0xae, 0x96, 0x69, 0xde,
- 0xf1, 0x33, 0x7b, 0x33);
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-AudioSinkFilter::AudioSinkFilter(const wchar_t* aObjectName, HRESULT* aOutResult)
- : BaseFilter(aObjectName, CLSID_MozAudioSinkFilter),
- mFilterCritSec("AudioSinkFilter::mFilterCritSec")
-{
- (*aOutResult) = S_OK;
- mInputPin = new AudioSinkInputPin(L"AudioSinkInputPin",
- this,
- &mFilterCritSec,
- aOutResult);
-}
-
-AudioSinkFilter::~AudioSinkFilter()
-{
-}
-
-int
-AudioSinkFilter::GetPinCount()
-{
- return 1;
-}
-
-BasePin*
-AudioSinkFilter::GetPin(int aIndex)
-{
- CriticalSectionAutoEnter lockFilter(mFilterCritSec);
- return (aIndex == 0) ? static_cast<BasePin*>(mInputPin) : nullptr;
-}
-
-HRESULT
-AudioSinkFilter::Pause()
-{
- CriticalSectionAutoEnter lockFilter(mFilterCritSec);
- if (mState == State_Stopped) {
- // Change the state, THEN activate the input pin.
- mState = State_Paused;
- if (mInputPin && mInputPin->IsConnected()) {
- mInputPin->Active();
- }
- } else if (mState == State_Running) {
- mState = State_Paused;
- }
- return S_OK;
-}
-
-HRESULT
-AudioSinkFilter::Stop()
-{
- CriticalSectionAutoEnter lockFilter(mFilterCritSec);
- mState = State_Stopped;
- if (mInputPin) {
- mInputPin->Inactive();
- }
-
- GetSampleSink()->Flush();
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkFilter::Run(REFERENCE_TIME tStart)
-{
- LOG("AudioSinkFilter::Run(%lld) [%4.2lf]",
- RefTimeToUsecs(tStart),
- double(RefTimeToUsecs(tStart)) / USECS_PER_S);
- return media::BaseFilter::Run(tStart);
-}
-
-HRESULT
-AudioSinkFilter::GetClassID( OUT CLSID * pCLSID )
-{
- (* pCLSID) = CLSID_MozAudioSinkFilter;
- return S_OK;
-}
-
-HRESULT
-AudioSinkFilter::QueryInterface(REFIID aIId, void **aInterface)
-{
- if (aIId == IID_IMediaSeeking) {
- *aInterface = static_cast<IMediaSeeking*>(this);
- AddRef();
- return S_OK;
- }
- return mozilla::media::BaseFilter::QueryInterface(aIId, aInterface);
-}
-
-ULONG
-AudioSinkFilter::AddRef()
-{
- return ::InterlockedIncrement(&mRefCnt);
-}
-
-ULONG
-AudioSinkFilter::Release()
-{
- unsigned long newRefCnt = ::InterlockedDecrement(&mRefCnt);
- if (!newRefCnt) {
- delete this;
- }
- return newRefCnt;
-}
-
-SampleSink*
-AudioSinkFilter::GetSampleSink()
-{
- return mInputPin->GetSampleSink();
-}
-
-
-// IMediaSeeking implementation.
-//
-// Calls to IMediaSeeking are forwarded to the output pin that the
-// AudioSinkInputPin is connected to, i.e. upstream towards the parser and
-// source filters, which actually implement seeking.
-#define ENSURE_CONNECTED_PIN_SEEKING \
- if (!mInputPin) { \
- return E_NOTIMPL; \
- } \
- RefPtr<IMediaSeeking> pinSeeking = mInputPin->GetConnectedPinSeeking(); \
- if (!pinSeeking) { \
- return E_NOTIMPL; \
- }
-
-HRESULT
-AudioSinkFilter::GetCapabilities(DWORD* aCapabilities)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetCapabilities(aCapabilities);
-}
-
-HRESULT
-AudioSinkFilter::CheckCapabilities(DWORD* aCapabilities)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->CheckCapabilities(aCapabilities);
-}
-
-HRESULT
-AudioSinkFilter::IsFormatSupported(const GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->IsFormatSupported(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::QueryPreferredFormat(GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->QueryPreferredFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::GetTimeFormat(GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetTimeFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::IsUsingTimeFormat(const GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->IsUsingTimeFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::SetTimeFormat(const GUID* aFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->SetTimeFormat(aFormat);
-}
-
-HRESULT
-AudioSinkFilter::GetDuration(LONGLONG* aDuration)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetDuration(aDuration);
-}
-
-HRESULT
-AudioSinkFilter::GetStopPosition(LONGLONG* aStop)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetStopPosition(aStop);
-}
-
-HRESULT
-AudioSinkFilter::GetCurrentPosition(LONGLONG* aCurrent)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetCurrentPosition(aCurrent);
-}
-
-HRESULT
-AudioSinkFilter::ConvertTimeFormat(LONGLONG* aTarget,
- const GUID* aTargetFormat,
- LONGLONG aSource,
- const GUID* aSourceFormat)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->ConvertTimeFormat(aTarget,
- aTargetFormat,
- aSource,
- aSourceFormat);
-}
-
-HRESULT
-AudioSinkFilter::SetPositions(LONGLONG* aCurrent,
- DWORD aCurrentFlags,
- LONGLONG* aStop,
- DWORD aStopFlags)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->SetPositions(aCurrent,
- aCurrentFlags,
- aStop,
- aStopFlags);
-}
-
-HRESULT
-AudioSinkFilter::GetPositions(LONGLONG* aCurrent,
- LONGLONG* aStop)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetPositions(aCurrent, aStop);
-}
-
-HRESULT
-AudioSinkFilter::GetAvailable(LONGLONG* aEarliest,
- LONGLONG* aLatest)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetAvailable(aEarliest, aLatest);
-}
-
-HRESULT
-AudioSinkFilter::SetRate(double aRate)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->SetRate(aRate);
-}
-
-HRESULT
-AudioSinkFilter::GetRate(double* aRate)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetRate(aRate);
-}
-
-HRESULT
-AudioSinkFilter::GetPreroll(LONGLONG* aPreroll)
-{
- ENSURE_CONNECTED_PIN_SEEKING
- return pinSeeking->GetPreroll(aPreroll);
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/AudioSinkFilter.h b/dom/media/directshow/AudioSinkFilter.h
deleted file mode 100644
index 85abdfccf..000000000
--- a/dom/media/directshow/AudioSinkFilter.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(AudioSinkFilter_h_)
-#define AudioSinkFilter_h_
-
-#include "BaseFilter.h"
-#include "DirectShowUtils.h"
-#include "nsAutoPtr.h"
-#include "mozilla/RefPtr.h"
-
-namespace mozilla {
-
-class AudioSinkInputPin;
-class SampleSink;
-
-// Filter that acts as the end of the graph. Audio samples input into
-// this filter block the calling thread, and the calling thread is
-// unblocked when the decode thread extracts the sample. The samples
-// input into this filter are stored in the SampleSink, where the blocking
-// is implemented. The input pin owns the SampleSink.
-class AudioSinkFilter: public mozilla::media::BaseFilter,
- public IMediaSeeking
-{
-
-public:
- AudioSinkFilter(const wchar_t* aObjectName, HRESULT* aOutResult);
- virtual ~AudioSinkFilter();
-
- // Gets the input pin's sample sink.
- SampleSink* GetSampleSink();
-
- // IUnknown implementation.
- STDMETHODIMP QueryInterface(REFIID aIId, void **aInterface);
- STDMETHODIMP_(ULONG) AddRef();
- STDMETHODIMP_(ULONG) Release();
-
- // --------------------------------------------------------------------
- // CBaseFilter methods
- int GetPinCount ();
- mozilla::media::BasePin* GetPin ( IN int Index);
- STDMETHODIMP Pause ();
- STDMETHODIMP Stop ();
- STDMETHODIMP GetClassID ( OUT CLSID * pCLSID);
- STDMETHODIMP Run(REFERENCE_TIME tStart);
- // IMediaSeeking Methods...
-
- // We defer to SourceFilter, but we must expose the interface on
- // the output pins. Seeking commands come upstream from the renderers,
- // but they must be actioned at the source filters.
- STDMETHODIMP GetCapabilities(DWORD* aCapabilities);
- STDMETHODIMP CheckCapabilities(DWORD* aCapabilities);
- STDMETHODIMP IsFormatSupported(const GUID* aFormat);
- STDMETHODIMP QueryPreferredFormat(GUID* aFormat);
- STDMETHODIMP GetTimeFormat(GUID* aFormat);
- STDMETHODIMP IsUsingTimeFormat(const GUID* aFormat);
- STDMETHODIMP SetTimeFormat(const GUID* aFormat);
- STDMETHODIMP GetDuration(LONGLONG* pDuration);
- STDMETHODIMP GetStopPosition(LONGLONG* pStop);
- STDMETHODIMP GetCurrentPosition(LONGLONG* aCurrent);
- STDMETHODIMP ConvertTimeFormat(LONGLONG* aTarget,
- const GUID* aTargetFormat,
- LONGLONG aSource,
- const GUID* aSourceFormat);
- STDMETHODIMP SetPositions(LONGLONG* aCurrent,
- DWORD aCurrentFlags,
- LONGLONG* aStop,
- DWORD aStopFlags);
- STDMETHODIMP GetPositions(LONGLONG* aCurrent,
- LONGLONG* aStop);
- STDMETHODIMP GetAvailable(LONGLONG* aEarliest,
- LONGLONG* aLatest);
- STDMETHODIMP SetRate(double aRate);
- STDMETHODIMP GetRate(double* aRate);
- STDMETHODIMP GetPreroll(LONGLONG* aPreroll);
-
- // --------------------------------------------------------------------
- // class factory calls this
- static IUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
-
-private:
- CriticalSection mFilterCritSec;
-
- // Note: The input pin defers its refcounting to the sink filter, so when
- // the input pin is addrefed, what actually happens is the sink filter is
- // addrefed.
- nsAutoPtr<AudioSinkInputPin> mInputPin;
-};
-
-} // namespace mozilla
-
-#endif // AudioSinkFilter_h_
diff --git a/dom/media/directshow/AudioSinkInputPin.cpp b/dom/media/directshow/AudioSinkInputPin.cpp
deleted file mode 100644
index 85a6e3da3..000000000
--- a/dom/media/directshow/AudioSinkInputPin.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "AudioSinkInputPin.h"
-#include "AudioSinkFilter.h"
-#include "SampleSink.h"
-#include "mozilla/Logging.h"
-
-#include <wmsdkidl.h>
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-AudioSinkInputPin::AudioSinkInputPin(wchar_t* aObjectName,
- AudioSinkFilter* aFilter,
- mozilla::CriticalSection* aLock,
- HRESULT* aOutResult)
- : BaseInputPin(aObjectName, aFilter, aLock, aOutResult, aObjectName),
- mSegmentStartTime(0)
-{
- MOZ_COUNT_CTOR(AudioSinkInputPin);
- mSampleSink = new SampleSink();
-}
-
-AudioSinkInputPin::~AudioSinkInputPin()
-{
- MOZ_COUNT_DTOR(AudioSinkInputPin);
-}
-
-HRESULT
-AudioSinkInputPin::GetMediaType(int aPosition, MediaType* aOutMediaType)
-{
- NS_ENSURE_TRUE(aPosition >= 0, E_INVALIDARG);
- NS_ENSURE_TRUE(aOutMediaType, E_POINTER);
-
- if (aPosition > 0) {
- return S_FALSE;
- }
-
- // Note: We set output as PCM, as IEEE_FLOAT only works when using the
- // MP3 decoder as an MFT, and we can't do that while using DirectShow.
- aOutMediaType->SetType(&MEDIATYPE_Audio);
- aOutMediaType->SetSubtype(&MEDIASUBTYPE_PCM);
- aOutMediaType->SetType(&FORMAT_WaveFormatEx);
- aOutMediaType->SetTemporalCompression(FALSE);
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::CheckMediaType(const MediaType* aMediaType)
-{
- if (!aMediaType) {
- return E_INVALIDARG;
- }
-
- GUID majorType = *aMediaType->Type();
- if (majorType != MEDIATYPE_Audio && majorType != WMMEDIATYPE_Audio) {
- return E_INVALIDARG;
- }
-
- if (*aMediaType->Subtype() != MEDIASUBTYPE_PCM) {
- return E_INVALIDARG;
- }
-
- if (*aMediaType->FormatType() != FORMAT_WaveFormatEx) {
- return E_INVALIDARG;
- }
-
- // We accept the media type, stash its layout format!
- WAVEFORMATEX* wfx = (WAVEFORMATEX*)(aMediaType->pbFormat);
- GetSampleSink()->SetAudioFormat(wfx);
-
- return S_OK;
-}
-
-AudioSinkFilter*
-AudioSinkInputPin::GetAudioSinkFilter()
-{
- return reinterpret_cast<AudioSinkFilter*>(mFilter);
-}
-
-SampleSink*
-AudioSinkInputPin::GetSampleSink()
-{
- return mSampleSink;
-}
-
-HRESULT
-AudioSinkInputPin::SetAbsoluteMediaTime(IMediaSample* aSample)
-{
- HRESULT hr;
- REFERENCE_TIME start = 0, end = 0;
- hr = aSample->GetTime(&start, &end);
- NS_ENSURE_TRUE(SUCCEEDED(hr), E_FAIL);
- {
- CriticalSectionAutoEnter lock(*mLock);
- start += mSegmentStartTime;
- end += mSegmentStartTime;
- }
- hr = aSample->SetMediaTime(&start, &end);
- NS_ENSURE_TRUE(SUCCEEDED(hr), E_FAIL);
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::Receive(IMediaSample* aSample )
-{
- HRESULT hr;
- NS_ENSURE_TRUE(aSample, E_POINTER);
-
- hr = BaseInputPin::Receive(aSample);
- if (SUCCEEDED(hr) && hr != S_FALSE) { // S_FALSE == flushing
- // Set the timestamp of the sample after being adjusted for
- // seeking/segments in the "media time" attribute. When we seek,
- // DirectShow starts a new "segment", and starts labeling samples
- // from time=0 again, so we need to correct for this to get the
- // actual timestamps after seeking.
- hr = SetAbsoluteMediaTime(aSample);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
- hr = GetSampleSink()->Receive(aSample);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
- }
- return S_OK;
-}
-
-already_AddRefed<IMediaSeeking>
-AudioSinkInputPin::GetConnectedPinSeeking()
-{
- RefPtr<IPin> peer = GetConnected();
- if (!peer)
- return nullptr;
- RefPtr<IMediaSeeking> seeking;
- peer->QueryInterface(static_cast<IMediaSeeking**>(getter_AddRefs(seeking)));
- return seeking.forget();
-}
-
-HRESULT
-AudioSinkInputPin::BeginFlush()
-{
- HRESULT hr = media::BaseInputPin::BeginFlush();
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- GetSampleSink()->Flush();
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::EndFlush()
-{
- HRESULT hr = media::BaseInputPin::EndFlush();
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- // Reset the EOS flag, so that if we're called after a seek we still work.
- GetSampleSink()->Reset();
-
- return S_OK;
-}
-
-HRESULT
-AudioSinkInputPin::EndOfStream(void)
-{
- HRESULT hr = media::BaseInputPin::EndOfStream();
- if (FAILED(hr) || hr == S_FALSE) {
- // Pin is stil flushing.
- return hr;
- }
- GetSampleSink()->SetEOS();
-
- return S_OK;
-}
-
-
-HRESULT
-AudioSinkInputPin::NewSegment(REFERENCE_TIME tStart,
- REFERENCE_TIME tStop,
- double dRate)
-{
- CriticalSectionAutoEnter lock(*mLock);
- // Record the start time of the new segment, so that we can store the
- // correct absolute timestamp in the "media time" each incoming sample.
- mSegmentStartTime = tStart;
- return S_OK;
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/AudioSinkInputPin.h b/dom/media/directshow/AudioSinkInputPin.h
deleted file mode 100644
index 80503c641..000000000
--- a/dom/media/directshow/AudioSinkInputPin.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(AudioSinkInputPin_h_)
-#define AudioSinkInputPin_h_
-
-#include "BaseInputPin.h"
-#include "DirectShowUtils.h"
-#include "mozilla/RefPtr.h"
-#include "nsAutoPtr.h"
-
-namespace mozilla {
-
-namespace media {
- class MediaType;
-}
-
-class AudioSinkFilter;
-class SampleSink;
-
-
-// Input pin for capturing audio output of a DirectShow filter graph.
-// This is the input pin for the AudioSinkFilter.
-class AudioSinkInputPin: public mozilla::media::BaseInputPin
-{
-public:
- AudioSinkInputPin(wchar_t* aObjectName,
- AudioSinkFilter* aFilter,
- mozilla::CriticalSection* aLock,
- HRESULT* aOutResult);
- virtual ~AudioSinkInputPin();
-
- HRESULT GetMediaType (IN int iPos, OUT mozilla::media::MediaType * pmt);
- HRESULT CheckMediaType (IN const mozilla::media::MediaType * pmt);
- STDMETHODIMP Receive (IN IMediaSample *);
- STDMETHODIMP BeginFlush() override;
- STDMETHODIMP EndFlush() override;
-
- // Called when we start decoding a new segment, that happens directly after
- // a seek. This captures the segment's start time. Samples decoded by the
- // MP3 decoder have their timestamps offset from the segment start time.
- // Storing the segment start time enables us to set each sample's MediaTime
- // as an offset in the stream relative to the start of the stream, rather
- // than the start of the segment, i.e. its absolute time in the stream.
- STDMETHODIMP NewSegment(REFERENCE_TIME tStart,
- REFERENCE_TIME tStop,
- double dRate) override;
-
- STDMETHODIMP EndOfStream() override;
-
- // Returns the IMediaSeeking interface of the connected output pin.
- // We forward seeking requests upstream from the sink to the source
- // filters.
- already_AddRefed<IMediaSeeking> GetConnectedPinSeeking();
-
- SampleSink* GetSampleSink();
-
-private:
- AudioSinkFilter* GetAudioSinkFilter();
-
- // Sets the media time on the media sample, relative to the segment
- // start time.
- HRESULT SetAbsoluteMediaTime(IMediaSample* aSample);
-
- nsAutoPtr<SampleSink> mSampleSink;
-
- // Synchronized by the filter lock; BaseInputPin::mLock.
- REFERENCE_TIME mSegmentStartTime;
-};
-
-} // namespace mozilla
-
-#endif // AudioSinkInputPin_h_
diff --git a/dom/media/directshow/DirectShowDecoder.cpp b/dom/media/directshow/DirectShowDecoder.cpp
deleted file mode 100644
index da68b4daa..000000000
--- a/dom/media/directshow/DirectShowDecoder.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "DirectShowDecoder.h"
-#include "DirectShowReader.h"
-#include "DirectShowUtils.h"
-#include "MediaDecoderStateMachine.h"
-#include "mozilla/Preferences.h"
-#include "mozilla/WindowsVersion.h"
-
-namespace mozilla {
-
-MediaDecoderStateMachine* DirectShowDecoder::CreateStateMachine()
-{
- return new MediaDecoderStateMachine(this, new DirectShowReader(this));
-}
-
-/* static */
-bool
-DirectShowDecoder::GetSupportedCodecs(const nsACString& aType,
- char const *const ** aCodecList)
-{
- if (!IsEnabled()) {
- return false;
- }
-
- static char const *const mp3AudioCodecs[] = {
- "mp3",
- nullptr
- };
- if (aType.EqualsASCII("audio/mpeg") ||
- aType.EqualsASCII("audio/mp3")) {
- if (aCodecList) {
- *aCodecList = mp3AudioCodecs;
- }
- return true;
- }
-
- return false;
-}
-
-/* static */
-bool
-DirectShowDecoder::IsEnabled()
-{
- return CanDecodeMP3UsingDirectShow() &&
- Preferences::GetBool("media.directshow.enabled");
-}
-
-DirectShowDecoder::DirectShowDecoder(MediaDecoderOwner* aOwner)
- : MediaDecoder(aOwner)
-{
- MOZ_COUNT_CTOR(DirectShowDecoder);
-}
-
-DirectShowDecoder::~DirectShowDecoder()
-{
- MOZ_COUNT_DTOR(DirectShowDecoder);
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/DirectShowDecoder.h b/dom/media/directshow/DirectShowDecoder.h
deleted file mode 100644
index c4d371fbf..000000000
--- a/dom/media/directshow/DirectShowDecoder.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(DirectShowDecoder_h_)
-#define DirectShowDecoder_h_
-
-#include "MediaDecoder.h"
-
-namespace mozilla {
-
-// Decoder that uses DirectShow to playback MP3 files only.
-class DirectShowDecoder : public MediaDecoder
-{
-public:
-
- explicit DirectShowDecoder(MediaDecoderOwner* aOwner);
- virtual ~DirectShowDecoder();
-
- MediaDecoder* Clone(MediaDecoderOwner* aOwner) override {
- if (!IsEnabled()) {
- return nullptr;
- }
- return new DirectShowDecoder(aOwner);
- }
-
- MediaDecoderStateMachine* CreateStateMachine() override;
-
- // Returns true if aType is a MIME type that we render with the
- // DirectShow backend. If aCodecList is non null,
- // it is filled with a (static const) null-terminated list of strings
- // denoting the codecs we'll playback. Note that playback is strictly
- // limited to MP3 only.
- static bool GetSupportedCodecs(const nsACString& aType,
- char const *const ** aCodecList);
-
- // Returns true if the DirectShow backend is preffed on.
- static bool IsEnabled();
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/directshow/DirectShowReader.cpp b/dom/media/directshow/DirectShowReader.cpp
deleted file mode 100644
index cacf6f8de..000000000
--- a/dom/media/directshow/DirectShowReader.cpp
+++ /dev/null
@@ -1,360 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "DirectShowReader.h"
-#include "MediaDecoderReader.h"
-#include "mozilla/RefPtr.h"
-#include "DirectShowUtils.h"
-#include "AudioSinkFilter.h"
-#include "SourceFilter.h"
-#include "SampleSink.h"
-#include "VideoUtils.h"
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-// Windows XP's MP3 decoder filter. This is available on XP only, on Vista
-// and later we can use the DMO Wrapper filter and MP3 decoder DMO.
-const GUID DirectShowReader::CLSID_MPEG_LAYER_3_DECODER_FILTER =
-{ 0x38BE3000, 0xDBF4, 0x11D0, {0x86, 0x0E, 0x00, 0xA0, 0x24, 0xCF, 0xEF, 0x6D} };
-
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-DirectShowReader::DirectShowReader(AbstractMediaDecoder* aDecoder)
- : MediaDecoderReader(aDecoder),
- mMP3FrameParser(aDecoder->GetResource()->GetLength()),
-#ifdef DIRECTSHOW_REGISTER_GRAPH
- mRotRegister(0),
-#endif
- mNumChannels(0),
- mAudioRate(0),
- mBytesPerSample(0)
-{
- MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
- MOZ_COUNT_CTOR(DirectShowReader);
-}
-
-DirectShowReader::~DirectShowReader()
-{
- MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
- MOZ_COUNT_DTOR(DirectShowReader);
-#ifdef DIRECTSHOW_REGISTER_GRAPH
- if (mRotRegister) {
- RemoveGraphFromRunningObjectTable(mRotRegister);
- }
-#endif
-}
-
-// Try to parse the MP3 stream to make sure this is indeed an MP3, get the
-// estimated duration of the stream, and find the offset of the actual MP3
-// frames in the stream, as DirectShow doesn't like large ID3 sections.
-static nsresult
-ParseMP3Headers(MP3FrameParser *aParser, MediaResource *aResource)
-{
- const uint32_t MAX_READ_SIZE = 4096;
-
- uint64_t offset = 0;
- while (aParser->NeedsData() && !aParser->ParsedHeaders()) {
- uint32_t bytesRead;
- char buffer[MAX_READ_SIZE];
- nsresult rv = aResource->ReadAt(offset, buffer,
- MAX_READ_SIZE, &bytesRead);
- NS_ENSURE_SUCCESS(rv, rv);
-
- if (!bytesRead) {
- // End of stream.
- return NS_ERROR_FAILURE;
- }
-
- aParser->Parse(reinterpret_cast<uint8_t*>(buffer), bytesRead, offset);
- offset += bytesRead;
- }
-
- return aParser->IsMP3() ? NS_OK : NS_ERROR_FAILURE;
-}
-
-nsresult
-DirectShowReader::ReadMetadata(MediaInfo* aInfo,
- MetadataTags** aTags)
-{
- MOZ_ASSERT(OnTaskQueue());
- HRESULT hr;
- nsresult rv;
-
- // Create the filter graph, reference it by the GraphBuilder interface,
- // to make graph building more convenient.
- hr = CoCreateInstance(CLSID_FilterGraph,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IGraphBuilder,
- reinterpret_cast<void**>(static_cast<IGraphBuilder**>(getter_AddRefs(mGraph))));
- NS_ENSURE_TRUE(SUCCEEDED(hr) && mGraph, NS_ERROR_FAILURE);
-
- rv = ParseMP3Headers(&mMP3FrameParser, mDecoder->GetResource());
- NS_ENSURE_SUCCESS(rv, rv);
-
- #ifdef DIRECTSHOW_REGISTER_GRAPH
- hr = AddGraphToRunningObjectTable(mGraph, &mRotRegister);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
- #endif
-
- // Extract the interface pointers we'll need from the filter graph.
- hr = mGraph->QueryInterface(static_cast<IMediaControl**>(getter_AddRefs(mControl)));
- NS_ENSURE_TRUE(SUCCEEDED(hr) && mControl, NS_ERROR_FAILURE);
-
- hr = mGraph->QueryInterface(static_cast<IMediaSeeking**>(getter_AddRefs(mMediaSeeking)));
- NS_ENSURE_TRUE(SUCCEEDED(hr) && mMediaSeeking, NS_ERROR_FAILURE);
-
- // Build the graph. Create the filters we need, and connect them. We
- // build the entire graph ourselves to prevent other decoders installed
- // on the system being created and used.
-
- // Our source filters, wraps the MediaResource.
- mSourceFilter = new SourceFilter(MEDIATYPE_Stream, MEDIASUBTYPE_MPEG1Audio);
- NS_ENSURE_TRUE(mSourceFilter, NS_ERROR_FAILURE);
-
- rv = mSourceFilter->Init(mDecoder->GetResource(), mMP3FrameParser.GetMP3Offset());
- NS_ENSURE_SUCCESS(rv, rv);
-
- hr = mGraph->AddFilter(mSourceFilter, L"MozillaDirectShowSource");
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- // The MPEG demuxer.
- RefPtr<IBaseFilter> demuxer;
- hr = CreateAndAddFilter(mGraph,
- CLSID_MPEG1Splitter,
- L"MPEG1Splitter",
- getter_AddRefs(demuxer));
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- // Platform MP3 decoder.
- RefPtr<IBaseFilter> decoder;
- // Firstly try to create the MP3 decoder filter that ships with WinXP
- // directly. This filter doesn't normally exist on later versions of
- // Windows.
- hr = CreateAndAddFilter(mGraph,
- CLSID_MPEG_LAYER_3_DECODER_FILTER,
- L"MPEG Layer 3 Decoder",
- getter_AddRefs(decoder));
- if (FAILED(hr)) {
- // Failed to create MP3 decoder filter. Try to instantiate
- // the MP3 decoder DMO.
- hr = AddMP3DMOWrapperFilter(mGraph, getter_AddRefs(decoder));
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
- }
-
- // Sink, captures audio samples and inserts them into our pipeline.
- static const wchar_t* AudioSinkFilterName = L"MozAudioSinkFilter";
- mAudioSinkFilter = new AudioSinkFilter(AudioSinkFilterName, &hr);
- NS_ENSURE_TRUE(mAudioSinkFilter && SUCCEEDED(hr), NS_ERROR_FAILURE);
- hr = mGraph->AddFilter(mAudioSinkFilter, AudioSinkFilterName);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- // Join the filters.
- hr = ConnectFilters(mGraph, mSourceFilter, demuxer);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- hr = ConnectFilters(mGraph, demuxer, decoder);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- hr = ConnectFilters(mGraph, decoder, mAudioSinkFilter);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- WAVEFORMATEX format;
- mAudioSinkFilter->GetSampleSink()->GetAudioFormat(&format);
- NS_ENSURE_TRUE(format.wFormatTag == WAVE_FORMAT_PCM, NS_ERROR_FAILURE);
-
- mInfo.mAudio.mChannels = mNumChannels = format.nChannels;
- mInfo.mAudio.mRate = mAudioRate = format.nSamplesPerSec;
- mInfo.mAudio.mBitDepth = format.wBitsPerSample;
- mBytesPerSample = format.wBitsPerSample / 8;
-
- // Begin decoding!
- hr = mControl->Run();
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- DWORD seekCaps = 0;
- hr = mMediaSeeking->GetCapabilities(&seekCaps);
- mInfo.mMediaSeekable = SUCCEEDED(hr) && (AM_SEEKING_CanSeekAbsolute & seekCaps);
-
- int64_t duration = mMP3FrameParser.GetDuration();
- if (SUCCEEDED(hr)) {
- mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(duration));
- }
-
- LOG("Successfully initialized DirectShow MP3 decoder.");
- LOG("Channels=%u Hz=%u duration=%lld bytesPerSample=%d",
- mInfo.mAudio.mChannels,
- mInfo.mAudio.mRate,
- RefTimeToUsecs(duration),
- mBytesPerSample);
-
- *aInfo = mInfo;
- // Note: The SourceFilter strips ID3v2 tags out of the stream.
- *aTags = nullptr;
-
- return NS_OK;
-}
-
-inline float
-UnsignedByteToAudioSample(uint8_t aValue)
-{
- return aValue * (2.0f / UINT8_MAX) - 1.0f;
-}
-
-bool
-DirectShowReader::Finish(HRESULT aStatus)
-{
- MOZ_ASSERT(OnTaskQueue());
-
- LOG("DirectShowReader::Finish(0x%x)", aStatus);
- // Notify the filter graph of end of stream.
- RefPtr<IMediaEventSink> eventSink;
- HRESULT hr = mGraph->QueryInterface(static_cast<IMediaEventSink**>(getter_AddRefs(eventSink)));
- if (SUCCEEDED(hr) && eventSink) {
- eventSink->Notify(EC_COMPLETE, aStatus, 0);
- }
- return false;
-}
-
-class DirectShowCopy
-{
-public:
- DirectShowCopy(uint8_t *aSource, uint32_t aBytesPerSample,
- uint32_t aSamples, uint32_t aChannels)
- : mSource(aSource)
- , mBytesPerSample(aBytesPerSample)
- , mSamples(aSamples)
- , mChannels(aChannels)
- , mNextSample(0)
- { }
-
- uint32_t operator()(AudioDataValue *aBuffer, uint32_t aSamples)
- {
- uint32_t maxSamples = std::min(aSamples, mSamples - mNextSample);
- uint32_t frames = maxSamples / mChannels;
- size_t byteOffset = mNextSample * mBytesPerSample;
- if (mBytesPerSample == 1) {
- for (uint32_t i = 0; i < maxSamples; ++i) {
- uint8_t *sample = mSource + byteOffset;
- aBuffer[i] = UnsignedByteToAudioSample(*sample);
- byteOffset += mBytesPerSample;
- }
- } else if (mBytesPerSample == 2) {
- for (uint32_t i = 0; i < maxSamples; ++i) {
- int16_t *sample = reinterpret_cast<int16_t *>(mSource + byteOffset);
- aBuffer[i] = AudioSampleToFloat(*sample);
- byteOffset += mBytesPerSample;
- }
- }
- mNextSample += maxSamples;
- return frames;
- }
-
-private:
- uint8_t * const mSource;
- const uint32_t mBytesPerSample;
- const uint32_t mSamples;
- const uint32_t mChannels;
- uint32_t mNextSample;
-};
-
-bool
-DirectShowReader::DecodeAudioData()
-{
- MOZ_ASSERT(OnTaskQueue());
- HRESULT hr;
-
- SampleSink* sink = mAudioSinkFilter->GetSampleSink();
- if (sink->AtEOS()) {
- // End of stream.
- return Finish(S_OK);
- }
-
- // Get the next chunk of audio samples. This blocks until the sample
- // arrives, or an error occurs (like the stream is shutdown).
- RefPtr<IMediaSample> sample;
- hr = sink->Extract(sample);
- if (FAILED(hr) || hr == S_FALSE) {
- return Finish(hr);
- }
-
- int64_t start = 0, end = 0;
- sample->GetMediaTime(&start, &end);
- LOG("DirectShowReader::DecodeAudioData [%4.2lf-%4.2lf]",
- RefTimeToSeconds(start),
- RefTimeToSeconds(end));
-
- LONG length = sample->GetActualDataLength();
- LONG numSamples = length / mBytesPerSample;
- LONG numFrames = length / mBytesPerSample / mNumChannels;
-
- BYTE* data = nullptr;
- hr = sample->GetPointer(&data);
- NS_ENSURE_TRUE(SUCCEEDED(hr), Finish(hr));
-
- mAudioCompactor.Push(mDecoder->GetResource()->Tell(),
- RefTimeToUsecs(start),
- mInfo.mAudio.mRate,
- numFrames,
- mNumChannels,
- DirectShowCopy(reinterpret_cast<uint8_t *>(data),
- mBytesPerSample,
- numSamples,
- mNumChannels));
- return true;
-}
-
-bool
-DirectShowReader::DecodeVideoFrame(bool &aKeyframeSkip,
- int64_t aTimeThreshold)
-{
- MOZ_ASSERT(OnTaskQueue());
- return false;
-}
-
-RefPtr<MediaDecoderReader::SeekPromise>
-DirectShowReader::Seek(SeekTarget aTarget, int64_t aEndTime)
-{
- nsresult res = SeekInternal(aTarget.GetTime().ToMicroseconds());
- if (NS_FAILED(res)) {
- return SeekPromise::CreateAndReject(res, __func__);
- } else {
- return SeekPromise::CreateAndResolve(aTarget.GetTime(), __func__);
- }
-}
-
-nsresult
-DirectShowReader::SeekInternal(int64_t aTargetUs)
-{
- HRESULT hr;
- MOZ_ASSERT(OnTaskQueue());
-
- LOG("DirectShowReader::Seek() target=%lld", aTargetUs);
-
- hr = mControl->Pause();
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- nsresult rv = ResetDecode();
- NS_ENSURE_SUCCESS(rv, rv);
-
- LONGLONG seekPosition = UsecsToRefTime(aTargetUs);
- hr = mMediaSeeking->SetPositions(&seekPosition,
- AM_SEEKING_AbsolutePositioning,
- nullptr,
- AM_SEEKING_NoPositioning);
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- hr = mControl->Run();
- NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
-
- return NS_OK;
-}
-
-} // namespace mozilla
diff --git a/dom/media/directshow/DirectShowReader.h b/dom/media/directshow/DirectShowReader.h
deleted file mode 100644
index 881b27c28..000000000
--- a/dom/media/directshow/DirectShowReader.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(DirectShowReader_h_)
-#define DirectShowReader_h_
-
-#include "windows.h" // HRESULT, DWORD
-#include "MediaDecoderReader.h"
-#include "MediaResource.h"
-#include "mozilla/RefPtr.h"
-#include "MP3FrameParser.h"
-
-// Add the graph to the Running Object Table so that we can connect
-// to this graph with GraphEdit/GraphStudio. Note: you must
-// also regsvr32 proppage.dll from the Windows SDK.
-// See: http://msdn.microsoft.com/en-us/library/ms787252(VS.85).aspx
-// #define DIRECTSHOW_REGISTER_GRAPH
-
-struct IGraphBuilder;
-struct IMediaControl;
-struct IMediaSeeking;
-
-namespace mozilla {
-
-class AudioSinkFilter;
-class SourceFilter;
-
-// Decoder backend for decoding MP3 using DirectShow. DirectShow operates as
-// a filter graph. The basic design of the DirectShowReader is that we have
-// a SourceFilter that wraps the MediaResource that connects to the
-// MP3 decoder filter. The MP3 decoder filter "pulls" data as it requires it
-// downstream on its own thread. When the MP3 decoder has produced a block of
-// decoded samples, its thread calls downstream into our AudioSinkFilter,
-// passing the decoded buffer in. The AudioSinkFilter inserts the samples into
-// a SampleSink object. The SampleSink blocks the MP3 decoder's thread until
-// the decode thread calls DecodeAudioData(), whereupon the SampleSink
-// releases the decoded samples to the decode thread, and unblocks the MP3
-// decoder's thread. The MP3 decoder can then request more data from the
-// SourceFilter, and decode more data. If the decode thread calls
-// DecodeAudioData() and there's no decoded samples waiting to be extracted
-// in the SampleSink, the SampleSink blocks the decode thread until the MP3
-// decoder produces a decoded sample.
-class DirectShowReader : public MediaDecoderReader
-{
-public:
- DirectShowReader(AbstractMediaDecoder* aDecoder);
-
- virtual ~DirectShowReader();
-
- bool DecodeAudioData() override;
- bool DecodeVideoFrame(bool &aKeyframeSkip,
- int64_t aTimeThreshold) override;
-
- nsresult ReadMetadata(MediaInfo* aInfo,
- MetadataTags** aTags) override;
-
- RefPtr<SeekPromise>
- Seek(SeekTarget aTarget, int64_t aEndTime) override;
-
- static const GUID CLSID_MPEG_LAYER_3_DECODER_FILTER;
-
-private:
- // Notifies the filter graph that playback is complete. aStatus is
- // the code to send to the filter graph. Always returns false, so
- // that we can just "return Finish()" from DecodeAudioData().
- bool Finish(HRESULT aStatus);
-
- nsresult SeekInternal(int64_t aTime);
-
- // DirectShow filter graph, and associated playback and seeking
- // control interfaces.
- RefPtr<IGraphBuilder> mGraph;
- RefPtr<IMediaControl> mControl;
- RefPtr<IMediaSeeking> mMediaSeeking;
-
- // Wraps the MediaResource, and feeds undecoded data into the filter graph.
- RefPtr<SourceFilter> mSourceFilter;
-
- // Sits at the end of the graph, removing decoded samples from the graph.
- // The graph will block while this is blocked, i.e. it will pause decoding.
- RefPtr<AudioSinkFilter> mAudioSinkFilter;
-
- // Some MP3s are variable bitrate, so DirectShow's duration estimation
- // can make its duration estimation based on the wrong bitrate. So we parse
- // the MP3 frames to get a more accuate estimate of the duration.
- MP3FrameParser mMP3FrameParser;
-
-#ifdef DIRECTSHOW_REGISTER_GRAPH
- // Used to add/remove the filter graph to the Running Object Table. You can
- // connect GraphEdit/GraphStudio to the graph to observe and/or debug its
- // topology and state.
- DWORD mRotRegister;
-#endif
-
- // Number of channels in the audio stream.
- uint32_t mNumChannels;
-
- // Samples per second in the audio stream.
- uint32_t mAudioRate;
-
- // Number of bytes per sample. Can be either 1 or 2.
- uint32_t mBytesPerSample;
-};
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/directshow/DirectShowUtils.cpp b/dom/media/directshow/DirectShowUtils.cpp
deleted file mode 100644
index b2afa7528..000000000
--- a/dom/media/directshow/DirectShowUtils.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "DirectShowUtils.h"
-#include "dmodshow.h"
-#include "wmcodecdsp.h"
-#include "dmoreg.h"
-#include "mozilla/ArrayUtils.h"
-#include "mozilla/RefPtr.h"
-#include "nsPrintfCString.h"
-
-#define WARN(...) NS_WARNING(nsPrintfCString(__VA_ARGS__).get())
-
-namespace mozilla {
-
-// Create a table which maps GUIDs to a string representation of the GUID.
-// This is useful for debugging purposes, for logging the GUIDs of media types.
-// This is only available when logging is enabled, i.e. not in release builds.
-struct GuidToName {
- const char* name;
- const GUID guid;
-};
-
-#pragma push_macro("OUR_GUID_ENTRY")
-#undef OUR_GUID_ENTRY
-#define OUR_GUID_ENTRY(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
- { #name, {l, w1, w2, {b1, b2, b3, b4, b5, b6, b7, b8}} },
-
-static const GuidToName GuidToNameTable[] = {
-#include <uuids.h>
-};
-
-#pragma pop_macro("OUR_GUID_ENTRY")
-
-const char*
-GetDirectShowGuidName(const GUID& aGuid)
-{
- const size_t len = ArrayLength(GuidToNameTable);
- for (unsigned i = 0; i < len; i++) {
- if (IsEqualGUID(aGuid, GuidToNameTable[i].guid)) {
- return GuidToNameTable[i].name;
- }
- }
- return "Unknown";
-}
-
-void
-RemoveGraphFromRunningObjectTable(DWORD aRotRegister)
-{
- RefPtr<IRunningObjectTable> runningObjectTable;
- if (SUCCEEDED(GetRunningObjectTable(0, getter_AddRefs(runningObjectTable)))) {
- runningObjectTable->Revoke(aRotRegister);
- }
-}
-
-HRESULT
-AddGraphToRunningObjectTable(IUnknown *aUnkGraph, DWORD *aOutRotRegister)
-{
- HRESULT hr;
-
- RefPtr<IMoniker> moniker;
- RefPtr<IRunningObjectTable> runningObjectTable;
-
- hr = GetRunningObjectTable(0, getter_AddRefs(runningObjectTable));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- const size_t STRING_LENGTH = 256;
- WCHAR wsz[STRING_LENGTH];
-
- StringCchPrintfW(wsz,
- STRING_LENGTH,
- L"FilterGraph %08x pid %08x",
- (DWORD_PTR)aUnkGraph,
- GetCurrentProcessId());
-
- hr = CreateItemMoniker(L"!", wsz, getter_AddRefs(moniker));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- hr = runningObjectTable->Register(ROTFLAGS_REGISTRATIONKEEPSALIVE,
- aUnkGraph,
- moniker,
- aOutRotRegister);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- return S_OK;
-}
-
-const char*
-GetGraphNotifyString(long evCode)
-{
-#define CASE(x) case x: return #x
- switch(evCode) {
- CASE(EC_ACTIVATE); // A video window is being activated or deactivated.
- CASE(EC_BANDWIDTHCHANGE); // Not supported.
- CASE(EC_BUFFERING_DATA); // The graph is buffering data, or has stopped buffering data.
- CASE(EC_BUILT); // Send by the Video Control when a graph has been built. Not forwarded to applications.
- CASE(EC_CLOCK_CHANGED); // The reference clock has changed.
- CASE(EC_CLOCK_UNSET); // The clock provider was disconnected.
- CASE(EC_CODECAPI_EVENT); // Sent by an encoder to signal an encoding event.
- CASE(EC_COMPLETE); // All data from a particular stream has been rendered.
- CASE(EC_CONTENTPROPERTY_CHANGED); // Not supported.
- CASE(EC_DEVICE_LOST); // A Plug and Play device was removed or has become available again.
- CASE(EC_DISPLAY_CHANGED); // The display mode has changed.
- CASE(EC_END_OF_SEGMENT); // The end of a segment has been reached.
- CASE(EC_EOS_SOON); // Not supported.
- CASE(EC_ERROR_STILLPLAYING); // An asynchronous command to run the graph has failed.
- CASE(EC_ERRORABORT); // An operation was aborted because of an error.
- CASE(EC_ERRORABORTEX); // An operation was aborted because of an error.
- CASE(EC_EXTDEVICE_MODE_CHANGE); // Not supported.
- CASE(EC_FILE_CLOSED); // The source file was closed because of an unexpected event.
- CASE(EC_FULLSCREEN_LOST); // The video renderer is switching out of full-screen mode.
- CASE(EC_GRAPH_CHANGED); // The filter graph has changed.
- CASE(EC_LENGTH_CHANGED); // The length of a source has changed.
- CASE(EC_LOADSTATUS); // Notifies the application of progress when opening a network file.
- CASE(EC_MARKER_HIT); // Not supported.
- CASE(EC_NEED_RESTART); // A filter is requesting that the graph be restarted.
- CASE(EC_NEW_PIN); // Not supported.
- CASE(EC_NOTIFY_WINDOW); // Notifies a filter of the video renderer's window.
- CASE(EC_OLE_EVENT); // A filter is passing a text string to the application.
- CASE(EC_OPENING_FILE); // The graph is opening a file, or has finished opening a file.
- CASE(EC_PALETTE_CHANGED); // The video palette has changed.
- CASE(EC_PAUSED); // A pause request has completed.
- CASE(EC_PLEASE_REOPEN); // The source file has changed.
- CASE(EC_PREPROCESS_COMPLETE); // Sent by the WM ASF Writer filter when it completes the pre-processing for multipass encoding.
- CASE(EC_PROCESSING_LATENCY); // Indicates the amount of time that a component is taking to process each sample.
- CASE(EC_QUALITY_CHANGE); // The graph is dropping samples, for quality control.
- //CASE(EC_RENDER_FINISHED); // Not supported.
- CASE(EC_REPAINT); // A video renderer requires a repaint.
- CASE(EC_SAMPLE_LATENCY); // Specifies how far behind schedule a component is for processing samples.
- //CASE(EC_SAMPLE_NEEDED); // Requests a new input sample from the Enhanced Video Renderer (EVR) filter.
- CASE(EC_SCRUB_TIME); // Specifies the time stamp for the most recent frame step.
- CASE(EC_SEGMENT_STARTED); // A new segment has started.
- CASE(EC_SHUTTING_DOWN); // The filter graph is shutting down, prior to being destroyed.
- CASE(EC_SNDDEV_IN_ERROR); // A device error has occurred in an audio capture filter.
- CASE(EC_SNDDEV_OUT_ERROR); // A device error has occurred in an audio renderer filter.
- CASE(EC_STARVATION); // A filter is not receiving enough data.
- CASE(EC_STATE_CHANGE); // The filter graph has changed state.
- CASE(EC_STATUS); // Contains two arbitrary status strings.
- CASE(EC_STEP_COMPLETE); // A filter performing frame stepping has stepped the specified number of frames.
- CASE(EC_STREAM_CONTROL_STARTED); // A stream-control start command has taken effect.
- CASE(EC_STREAM_CONTROL_STOPPED); // A stream-control stop command has taken effect.
- CASE(EC_STREAM_ERROR_STILLPLAYING); // An error has occurred in a stream. The stream is still playing.
- CASE(EC_STREAM_ERROR_STOPPED); // A stream has stopped because of an error.
- CASE(EC_TIMECODE_AVAILABLE); // Not supported.
- CASE(EC_UNBUILT); // Send by the Video Control when a graph has been torn down. Not forwarded to applications.
- CASE(EC_USERABORT); // The user has terminated playback.
- CASE(EC_VIDEO_SIZE_CHANGED); // The native video size has changed.
- CASE(EC_VIDEOFRAMEREADY); // A video frame is ready for display.
- CASE(EC_VMR_RECONNECTION_FAILED); // Sent by the VMR-7 and the VMR-9 when it was unable to accept a dynamic format change request from the upstream decoder.
- CASE(EC_VMR_RENDERDEVICE_SET); // Sent when the VMR has selected its rendering mechanism.
- CASE(EC_VMR_SURFACE_FLIPPED); // Sent when the VMR-7's allocator presenter has called the DirectDraw Flip method on the surface being presented.
- CASE(EC_WINDOW_DESTROYED); // The video renderer was destroyed or removed from the graph.
- CASE(EC_WMT_EVENT); // Sent by the WM ASF Reader filter when it reads ASF files protected by digital rights management (DRM).
- CASE(EC_WMT_INDEX_EVENT); // Sent when an application uses the WM ASF Writer to index Windows Media Video files.
- CASE(S_OK); // Success.
- CASE(VFW_S_AUDIO_NOT_RENDERED); // Partial success; the audio was not rendered.
- CASE(VFW_S_DUPLICATE_NAME); // Success; the Filter Graph Manager modified a filter name to avoid duplication.
- CASE(VFW_S_PARTIAL_RENDER); // Partial success; some of the streams in this movie are in an unsupported format.
- CASE(VFW_S_VIDEO_NOT_RENDERED); // Partial success; the video was not rendered.
- CASE(E_ABORT); // Operation aborted.
- CASE(E_OUTOFMEMORY); // Insufficient memory.
- CASE(E_POINTER); // Null pointer argument.
- CASE(VFW_E_CANNOT_CONNECT); // No combination of intermediate filters could be found to make the connection.
- CASE(VFW_E_CANNOT_RENDER); // No combination of filters could be found to render the stream.
- CASE(VFW_E_NO_ACCEPTABLE_TYPES); // There is no common media type between these pins.
- CASE(VFW_E_NOT_IN_GRAPH);
-
- default:
- return "Unknown Code";
- };
-#undef CASE
-}
-
-HRESULT
-CreateAndAddFilter(IGraphBuilder* aGraph,
- REFGUID aFilterClsId,
- LPCWSTR aFilterName,
- IBaseFilter **aOutFilter)
-{
- NS_ENSURE_TRUE(aGraph, E_POINTER);
- NS_ENSURE_TRUE(aOutFilter, E_POINTER);
- HRESULT hr;
-
- RefPtr<IBaseFilter> filter;
- hr = CoCreateInstance(aFilterClsId,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter));
- if (FAILED(hr)) {
- // Object probably not available on this system.
- WARN("CoCreateInstance failed, hr=%x", hr);
- return hr;
- }
-
- hr = aGraph->AddFilter(filter, aFilterName);
- if (FAILED(hr)) {
- WARN("AddFilter failed, hr=%x", hr);
- return hr;
- }
-
- filter.forget(aOutFilter);
-
- return S_OK;
-}
-
-HRESULT
-CreateMP3DMOWrapperFilter(IBaseFilter **aOutFilter)
-{
- NS_ENSURE_TRUE(aOutFilter, E_POINTER);
- HRESULT hr;
-
- // Create the wrapper filter.
- RefPtr<IBaseFilter> filter;
- hr = CoCreateInstance(CLSID_DMOWrapperFilter,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter));
- if (FAILED(hr)) {
- WARN("CoCreateInstance failed, hr=%x", hr);
- return hr;
- }
-
- // Query for IDMOWrapperFilter.
- RefPtr<IDMOWrapperFilter> dmoWrapper;
- hr = filter->QueryInterface(IID_IDMOWrapperFilter,
- getter_AddRefs(dmoWrapper));
- if (FAILED(hr)) {
- WARN("QueryInterface failed, hr=%x", hr);
- return hr;
- }
-
- hr = dmoWrapper->Init(CLSID_CMP3DecMediaObject, DMOCATEGORY_AUDIO_DECODER);
- if (FAILED(hr)) {
- // Can't instantiate MP3 DMO. It doesn't exist on Windows XP, we're
- // probably hitting that. Don't log warning to console, this is an
- // expected error.
- WARN("dmoWrapper Init failed, hr=%x", hr);
- return hr;
- }
-
- filter.forget(aOutFilter);
-
- return S_OK;
-}
-
-HRESULT
-AddMP3DMOWrapperFilter(IGraphBuilder* aGraph,
- IBaseFilter **aOutFilter)
-{
- NS_ENSURE_TRUE(aGraph, E_POINTER);
- NS_ENSURE_TRUE(aOutFilter, E_POINTER);
- HRESULT hr;
-
- // Create the wrapper filter.
- RefPtr<IBaseFilter> filter;
- hr = CreateMP3DMOWrapperFilter(getter_AddRefs(filter));
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- // Add the wrapper filter to graph.
- hr = aGraph->AddFilter(filter, L"MP3 Decoder DMO");
- if (FAILED(hr)) {
- WARN("AddFilter failed, hr=%x", hr);
- return hr;
- }
-
- filter.forget(aOutFilter);
-
- return S_OK;
-}
-
-bool
-CanDecodeMP3UsingDirectShow()
-{
- RefPtr<IBaseFilter> filter;
-
- // Can we create the MP3 demuxer filter?
- if (FAILED(CoCreateInstance(CLSID_MPEG1Splitter,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter)))) {
- return false;
- }
-
- // Can we create either the WinXP MP3 decoder filter or the MP3 DMO decoder?
- if (FAILED(CoCreateInstance(DirectShowReader::CLSID_MPEG_LAYER_3_DECODER_FILTER,
- nullptr,
- CLSCTX_INPROC_SERVER,
- IID_IBaseFilter,
- getter_AddRefs(filter))) &&
- FAILED(CreateMP3DMOWrapperFilter(getter_AddRefs(filter)))) {
- return false;
- }
-
- // Else, we can create all of the components we need. Assume
- // DirectShow is going to work...
- return true;
-}
-
-// Match a pin by pin direction and connection state.
-HRESULT
-MatchUnconnectedPin(IPin* aPin,
- PIN_DIRECTION aPinDir,
- bool *aOutMatches)
-{
- NS_ENSURE_TRUE(aPin, E_POINTER);
- NS_ENSURE_TRUE(aOutMatches, E_POINTER);
-
- // Ensure the pin is unconnected.
- RefPtr<IPin> peer;
- HRESULT hr = aPin->ConnectedTo(getter_AddRefs(peer));
- if (hr != VFW_E_NOT_CONNECTED) {
- *aOutMatches = false;
- return hr;
- }
-
- // Ensure the pin is of the specified direction.
- PIN_DIRECTION pinDir;
- hr = aPin->QueryDirection(&pinDir);
- NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
-
- *aOutMatches = (pinDir == aPinDir);
- return S_OK;
-}
-
-// Return the first unconnected input pin or output pin.
-already_AddRefed<IPin>
-GetUnconnectedPin(IBaseFilter* aFilter, PIN_DIRECTION aPinDir)
-{
- RefPtr<IEnumPins> enumPins;
-
- HRESULT hr = aFilter->EnumPins(getter_AddRefs(enumPins));
- NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
-
- // Test each pin to see if it matches the direction we're looking for.
- RefPtr<IPin> pin;
- while (S_OK == enumPins->Next(1, getter_AddRefs(pin), nullptr)) {
- bool matches = FALSE;
- if (SUCCEEDED(MatchUnconnectedPin(pin, aPinDir, &matches)) &&
- matches) {
- return pin.forget();
- }
- }
-
- return nullptr;
-}
-
-HRESULT
-ConnectFilters(IGraphBuilder* aGraph,
- IBaseFilter* aOutputFilter,
- IBaseFilter* aInputFilter)
-{
- RefPtr<IPin> output = GetUnconnectedPin(aOutputFilter, PINDIR_OUTPUT);
- NS_ENSURE_TRUE(output, E_FAIL);
-
- RefPtr<IPin> input = GetUnconnectedPin(aInputFilter, PINDIR_INPUT);
- NS_ENSURE_TRUE(output, E_FAIL);
-
- return aGraph->Connect(output, input);
-}
-
-} // namespace mozilla
-
-// avoid redefined macro in unified build
-#undef WARN
diff --git a/dom/media/directshow/DirectShowUtils.h b/dom/media/directshow/DirectShowUtils.h
deleted file mode 100644
index 3bbc122fc..000000000
--- a/dom/media/directshow/DirectShowUtils.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef _DirectShowUtils_h_
-#define _DirectShowUtils_h_
-
-#include <stdint.h>
-#include "dshow.h"
-
-// XXXbz windowsx.h defines GetFirstChild, GetNextSibling,
-// GetPrevSibling are macros, apparently... Eeevil. We have functions
-// called that on some classes, so undef them.
-#undef GetFirstChild
-#undef GetNextSibling
-#undef GetPrevSibling
-
-#include "DShowTools.h"
-#include "mozilla/Logging.h"
-
-namespace mozilla {
-
-// Win32 "Event" wrapper. Must be paired with a CriticalSection to create a
-// Java-style "monitor".
-class Signal {
-public:
-
- Signal(CriticalSection* aLock)
- : mLock(aLock)
- {
- CriticalSectionAutoEnter lock(*mLock);
- mEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
- }
-
- ~Signal() {
- CriticalSectionAutoEnter lock(*mLock);
- CloseHandle(mEvent);
- }
-
- // Lock must be held.
- void Notify() {
- SetEvent(mEvent);
- }
-
- // Lock must be held. Check the wait condition before waiting!
- HRESULT Wait() {
- mLock->Leave();
- DWORD result = WaitForSingleObject(mEvent, INFINITE);
- mLock->Enter();
- return result == WAIT_OBJECT_0 ? S_OK : E_FAIL;
- }
-
-private:
- CriticalSection* mLock;
- HANDLE mEvent;
-};
-
-HRESULT
-AddGraphToRunningObjectTable(IUnknown *aUnkGraph, DWORD *aOutRotRegister);
-
-void
-RemoveGraphFromRunningObjectTable(DWORD aRotRegister);
-
-const char*
-GetGraphNotifyString(long evCode);
-
-// Creates a filter and adds it to a graph.
-HRESULT
-CreateAndAddFilter(IGraphBuilder* aGraph,
- REFGUID aFilterClsId,
- LPCWSTR aFilterName,
- IBaseFilter **aOutFilter);
-
-HRESULT
-AddMP3DMOWrapperFilter(IGraphBuilder* aGraph,
- IBaseFilter **aOutFilter);
-
-// Connects the output pin on aOutputFilter to an input pin on
-// aInputFilter, in aGraph.
-HRESULT
-ConnectFilters(IGraphBuilder* aGraph,
- IBaseFilter* aOutputFilter,
- IBaseFilter* aInputFilter);
-
-HRESULT
-MatchUnconnectedPin(IPin* aPin,
- PIN_DIRECTION aPinDir,
- bool *aOutMatches);
-
-// Converts from microseconds to DirectShow "Reference Time"
-// (hundreds of nanoseconds).
-inline int64_t
-UsecsToRefTime(const int64_t aUsecs)
-{
- return aUsecs * 10;
-}
-
-// Converts from DirectShow "Reference Time" (hundreds of nanoseconds)
-// to microseconds.
-inline int64_t
-RefTimeToUsecs(const int64_t hRefTime)
-{
- return hRefTime / 10;
-}
-
-// Converts from DirectShow "Reference Time" (hundreds of nanoseconds)
-// to seconds.
-inline double
-RefTimeToSeconds(const REFERENCE_TIME aRefTime)
-{
- return double(aRefTime) / 10000000;
-}
-
-const char*
-GetDirectShowGuidName(const GUID& aGuid);
-
-// Returns true if we can instantiate an MP3 demuxer and decoder filters.
-// Use this to detect whether MP3 support is installed.
-bool
-CanDecodeMP3UsingDirectShow();
-
-} // namespace mozilla
-
-#endif
diff --git a/dom/media/directshow/SampleSink.cpp b/dom/media/directshow/SampleSink.cpp
deleted file mode 100644
index fa5dc8d19..000000000
--- a/dom/media/directshow/SampleSink.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "SampleSink.h"
-#include "AudioSinkFilter.h"
-#include "AudioSinkInputPin.h"
-#include "VideoUtils.h"
-#include "mozilla/Logging.h"
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-
-SampleSink::SampleSink()
- : mMonitor("SampleSink"),
- mIsFlushing(false),
- mAtEOS(false)
-{
- MOZ_COUNT_CTOR(SampleSink);
-}
-
-SampleSink::~SampleSink()
-{
- MOZ_COUNT_DTOR(SampleSink);
-}
-
-void
-SampleSink::SetAudioFormat(const WAVEFORMATEX* aInFormat)
-{
- NS_ENSURE_TRUE(aInFormat, );
- ReentrantMonitorAutoEnter mon(mMonitor);
- memcpy(&mAudioFormat, aInFormat, sizeof(WAVEFORMATEX));
-}
-
-void
-SampleSink::GetAudioFormat(WAVEFORMATEX* aOutFormat)
-{
- MOZ_ASSERT(aOutFormat);
- ReentrantMonitorAutoEnter mon(mMonitor);
- memcpy(aOutFormat, &mAudioFormat, sizeof(WAVEFORMATEX));
-}
-
-HRESULT
-SampleSink::Receive(IMediaSample* aSample)
-{
- ReentrantMonitorAutoEnter mon(mMonitor);
-
- while (true) {
- if (mIsFlushing) {
- return S_FALSE;
- }
- if (!mSample) {
- break;
- }
- if (mAtEOS) {
- return E_UNEXPECTED;
- }
- // Wait until the consumer thread consumes the sample.
- mon.Wait();
- }
-
- if (MOZ_LOG_TEST(gDirectShowLog, LogLevel::Debug)) {
- REFERENCE_TIME start = 0, end = 0;
- HRESULT hr = aSample->GetMediaTime(&start, &end);
- LOG("SampleSink::Receive() [%4.2lf-%4.2lf]",
- (double)RefTimeToUsecs(start) / USECS_PER_S,
- (double)RefTimeToUsecs(end) / USECS_PER_S);
- }
-
- mSample = aSample;
- // Notify the signal, to awaken the consumer thread in WaitForSample()
- // if necessary.
- mon.NotifyAll();
- return S_OK;
-}
-
-HRESULT
-SampleSink::Extract(RefPtr<IMediaSample>& aOutSample)
-{
- ReentrantMonitorAutoEnter mon(mMonitor);
- // Loop until we have a sample, or we should abort.
- while (true) {
- if (mIsFlushing) {
- return S_FALSE;
- }
- if (mSample) {
- break;
- }
- if (mAtEOS) {
- // Order is important here, if we have a sample, we should return it
- // before reporting EOS.
- return E_UNEXPECTED;
- }
- // Wait until the producer thread gives us a sample.
- mon.Wait();
- }
- aOutSample = mSample;
-
- if (MOZ_LOG_TEST(gDirectShowLog, LogLevel::Debug)) {
- int64_t start = 0, end = 0;
- mSample->GetMediaTime(&start, &end);
- LOG("SampleSink::Extract() [%4.2lf-%4.2lf]",
- (double)RefTimeToUsecs(start) / USECS_PER_S,
- (double)RefTimeToUsecs(end) / USECS_PER_S);
- }
-
- mSample = nullptr;
- // Notify the signal, to awaken the producer thread in Receive()
- // if necessary.
- mon.NotifyAll();
- return S_OK;
-}
-
-void
-SampleSink::Flush()
-{
- LOG("SampleSink::Flush()");
- ReentrantMonitorAutoEnter mon(mMonitor);
- mIsFlushing = true;
- mSample = nullptr;
- mon.NotifyAll();
-}
-
-void
-SampleSink::Reset()
-{
- LOG("SampleSink::Reset()");
- ReentrantMonitorAutoEnter mon(mMonitor);
- mIsFlushing = false;
- mAtEOS = false;
-}
-
-void
-SampleSink::SetEOS()
-{
- LOG("SampleSink::SetEOS()");
- ReentrantMonitorAutoEnter mon(mMonitor);
- mAtEOS = true;
- // Notify to unblock any threads waiting for samples in
- // Extract() or Receive(). Now that we're at EOS, no more samples
- // will come!
- mon.NotifyAll();
-}
-
-bool
-SampleSink::AtEOS()
-{
- ReentrantMonitorAutoEnter mon(mMonitor);
- return mAtEOS && !mSample;
-}
-
-} // namespace mozilla
-
diff --git a/dom/media/directshow/SampleSink.h b/dom/media/directshow/SampleSink.h
deleted file mode 100644
index 6a1af9fee..000000000
--- a/dom/media/directshow/SampleSink.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(SampleSink_h_)
-#define SampleSink_h_
-
-#include "BaseFilter.h"
-#include "DirectShowUtils.h"
-#include "mozilla/RefPtr.h"
-#include "mozilla/ReentrantMonitor.h"
-
-namespace mozilla {
-
-class SampleSink {
-public:
- SampleSink();
- virtual ~SampleSink();
-
- // Sets the audio format of the incoming samples. The upstream filter
- // calls this. This makes a copy.
- void SetAudioFormat(const WAVEFORMATEX* aInFormat);
-
- // Copies the format of incoming audio samples into into *aOutFormat.
- void GetAudioFormat(WAVEFORMATEX* aOutFormat);
-
- // Called when a sample is delivered by the DirectShow graph to the sink.
- // The decode thread retrieves the sample by calling WaitForSample().
- // Blocks if there's already a sample waiting to be consumed by the decode
- // thread.
- HRESULT Receive(IMediaSample* aSample);
-
- // Retrieves a sample from the sample queue, blocking until one becomes
- // available, or until an error occurs. Returns S_FALSE on EOS.
- HRESULT Extract(RefPtr<IMediaSample>& aOutSample);
-
- // Unblocks any threads waiting in GetSample().
- // Clears mSample, which unblocks upstream stream.
- void Flush();
-
- // Opens up the sink to receive more samples in PutSample().
- // Clears EOS flag.
- void Reset();
-
- // Marks that we've reacehd the end of stream.
- void SetEOS();
-
- // Returns whether we're at end of stream.
- bool AtEOS();
-
-private:
- // All data in this class is syncronized by mMonitor.
- ReentrantMonitor mMonitor;
- RefPtr<IMediaSample> mSample;
-
- // Format of the audio stream we're receiving.
- WAVEFORMATEX mAudioFormat;
-
- bool mIsFlushing;
- bool mAtEOS;
-};
-
-} // namespace mozilla
-
-#endif // SampleSink_h_
diff --git a/dom/media/directshow/SourceFilter.cpp b/dom/media/directshow/SourceFilter.cpp
deleted file mode 100644
index 4c5a0882c..000000000
--- a/dom/media/directshow/SourceFilter.cpp
+++ /dev/null
@@ -1,683 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "SourceFilter.h"
-#include "MediaResource.h"
-#include "mozilla/RefPtr.h"
-#include "DirectShowUtils.h"
-#include "MP3FrameParser.h"
-#include "mozilla/Logging.h"
-#include <algorithm>
-
-using namespace mozilla::media;
-
-namespace mozilla {
-
-// Define to trace what's on...
-//#define DEBUG_SOURCE_TRACE 1
-
-#if defined (DEBUG_SOURCE_TRACE)
-static LazyLogModule gDirectShowLog("DirectShowDecoder");
-#define DIRECTSHOW_LOG(...) MOZ_LOG(gDirectShowLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
-#else
-#define DIRECTSHOW_LOG(...)
-#endif
-
-static HRESULT
-DoGetInterface(IUnknown* aUnknown, void** aInterface)
-{
- if (!aInterface)
- return E_POINTER;
- *aInterface = aUnknown;
- aUnknown->AddRef();
- return S_OK;
-}
-
-// Stores details of IAsyncReader::Request().
-class ReadRequest {
-public:
-
- ReadRequest(IMediaSample* aSample,
- DWORD_PTR aDwUser,
- uint32_t aOffset,
- uint32_t aCount)
- : mSample(aSample),
- mDwUser(aDwUser),
- mOffset(aOffset),
- mCount(aCount)
- {
- MOZ_COUNT_CTOR(ReadRequest);
- }
-
- ~ReadRequest() {
- MOZ_COUNT_DTOR(ReadRequest);
- }
-
- RefPtr<IMediaSample> mSample;
- DWORD_PTR mDwUser;
- uint32_t mOffset;
- uint32_t mCount;
-};
-
-// A wrapper around media resource that presents only a partition of the
-// underlying resource to the caller to use. The partition returned is from
-// an offset to the end of stream, and this object deals with ensuring
-// the offsets and lengths etc are translated from the reduced partition
-// exposed to the caller, to the absolute offsets of the underlying stream.
-class MediaResourcePartition {
-public:
- MediaResourcePartition(MediaResource* aResource,
- int64_t aDataStart)
- : mResource(aResource),
- mDataOffset(aDataStart)
- {}
-
- int64_t GetLength() {
- int64_t len = mResource.GetLength();
- if (len == -1) {
- return len;
- }
- return std::max<int64_t>(0, len - mDataOffset);
- }
- nsresult ReadAt(int64_t aOffset, char* aBuffer,
- uint32_t aCount, uint32_t* aBytes)
- {
- return mResource.ReadAt(aOffset + mDataOffset,
- aBuffer,
- aCount,
- aBytes);
- }
- int64_t GetCachedDataEnd() {
- int64_t tell = mResource.GetResource()->Tell();
- int64_t dataEnd =
- mResource.GetResource()->GetCachedDataEnd(tell) - mDataOffset;
- return dataEnd;
- }
-private:
- // MediaResource from which we read data.
- MediaResourceIndex mResource;
- int64_t mDataOffset;
-};
-
-
-// Output pin for SourceFilter, which implements IAsyncReader, to
-// allow downstream filters to pull/read data from it. Downstream pins
-// register to read data using Request(), and asynchronously wait for the
-// reads to complete using WaitForNext(). They may also synchronously read
-// using SyncRead(). This class is a delegate (tear off) of
-// SourceFilter.
-//
-// We can expose only a segment of the MediaResource to the filter graph.
-// This is used to strip off the ID3v2 tags from the stream, as DirectShow
-// has trouble parsing some headers.
-//
-// Implements:
-// * IAsyncReader
-// * IPin
-// * IQualityControl
-// * IUnknown
-//
-class DECLSPEC_UUID("18e5cfb2-1015-440c-a65c-e63853235894")
-OutputPin : public IAsyncReader,
- public BasePin
-{
-public:
-
- OutputPin(MediaResource* aMediaResource,
- SourceFilter* aParent,
- CriticalSection& aFilterLock,
- int64_t aMP3DataStart);
- virtual ~OutputPin();
-
- // IUnknown
- // Defer to ref counting to BasePin, which defers to owning nsBaseFilter.
- STDMETHODIMP_(ULONG) AddRef() override { return BasePin::AddRef(); }
- STDMETHODIMP_(ULONG) Release() override { return BasePin::Release(); }
- STDMETHODIMP QueryInterface(REFIID iid, void** ppv) override;
-
- // BasePin Overrides.
- // Determines if the pin accepts a specific media type.
- HRESULT CheckMediaType(const MediaType* aMediaType) override;
-
- // Retrieves a preferred media type, by index value.
- HRESULT GetMediaType(int aPosition, MediaType* aMediaType) override;
-
- // Releases the pin from a connection.
- HRESULT BreakConnect(void) override;
-
- // Determines whether a pin connection is suitable.
- HRESULT CheckConnect(IPin* aPin) override;
-
-
- // IAsyncReader overrides
-
- // The RequestAllocator method requests an allocator during the
- // pin connection.
- STDMETHODIMP RequestAllocator(IMemAllocator* aPreferred,
- ALLOCATOR_PROPERTIES* aProps,
- IMemAllocator** aActual) override;
-
- // The Request method queues an asynchronous request for data. Downstream
- // will call WaitForNext() when they want to retrieve the result.
- STDMETHODIMP Request(IMediaSample* aSample, DWORD_PTR aUserData) override;
-
- // The WaitForNext method waits for the next pending read request
- // to complete. This method fails if the graph is flushing.
- // Defers to SyncRead/5.
- STDMETHODIMP WaitForNext(DWORD aTimeout,
- IMediaSample** aSamples,
- DWORD_PTR* aUserData) override;
-
- // The SyncReadAligned method performs a synchronous read. The method
- // blocks until the request is completed. Defers to SyncRead/5. This
- // method does not fail if the graph is flushing.
- STDMETHODIMP SyncReadAligned(IMediaSample* aSample) override;
-
- // The SyncRead method performs a synchronous read. The method blocks
- // until the request is completed. Defers to SyncRead/5. This
- // method does not fail if the graph is flushing.
- STDMETHODIMP SyncRead(LONGLONG aPosition, LONG aLength, BYTE* aBuffer) override;
-
- // The Length method retrieves the total length of the stream.
- STDMETHODIMP Length(LONGLONG* aTotal, LONGLONG* aAvailable) override;
-
- // IPin Overrides
- STDMETHODIMP BeginFlush(void) override;
- STDMETHODIMP EndFlush(void) override;
-
- uint32_t GetAndResetBytesConsumedCount();
-
-private:
-
- // Protects thread-shared data/structures (mFlushCount, mPendingReads).
- // WaitForNext() also waits on this monitor
- CriticalSection& mPinLock;
-
- // Signal used with mPinLock to implement WaitForNext().
- Signal mSignal;
-
- // The filter that owns us. Weak reference, as we're a delegate (tear off).
- SourceFilter* mParentSource;
-
- MediaResourcePartition mResource;
-
- // Counter, inc'd in BeginFlush(), dec'd in EndFlush(). Calls to this can
- // come from multiple threads and can interleave, hence the counter.
- int32_t mFlushCount;
-
- // Number of bytes that have been read from the output pin since the last
- // time GetAndResetBytesConsumedCount() was called.
- uint32_t mBytesConsumed;
-
- // Deque of ReadRequest* for reads that are yet to be serviced.
- // nsReadRequest's are stored on the heap, popper must delete them.
- nsDeque mPendingReads;
-
- // Flags if the downstream pin has QI'd for IAsyncReader. We refuse
- // connection if they don't query, as it means they're assuming that we're
- // a push filter, and we're not.
- bool mQueriedForAsyncReader;
-
-};
-
-// For mingw __uuidof support
-#ifdef __CRT_UUID_DECL
-}
-__CRT_UUID_DECL(mozilla::OutputPin, 0x18e5cfb2,0x1015,0x440c,0xa6,0x5c,0xe6,0x38,0x53,0x23,0x58,0x94);
-namespace mozilla {
-#endif
-
-OutputPin::OutputPin(MediaResource* aResource,
- SourceFilter* aParent,
- CriticalSection& aFilterLock,
- int64_t aMP3DataStart)
- : BasePin(static_cast<BaseFilter*>(aParent),
- &aFilterLock,
- L"MozillaOutputPin",
- PINDIR_OUTPUT),
- mPinLock(aFilterLock),
- mSignal(&mPinLock),
- mParentSource(aParent),
- mResource(aResource, aMP3DataStart),
- mFlushCount(0),
- mBytesConsumed(0),
- mQueriedForAsyncReader(false)
-{
- MOZ_COUNT_CTOR(OutputPin);
- DIRECTSHOW_LOG("OutputPin::OutputPin()");
-}
-
-OutputPin::~OutputPin()
-{
- MOZ_COUNT_DTOR(OutputPin);
- DIRECTSHOW_LOG("OutputPin::~OutputPin()");
-}
-
-HRESULT
-OutputPin::BreakConnect()
-{
- mQueriedForAsyncReader = false;
- return BasePin::BreakConnect();
-}
-
-STDMETHODIMP
-OutputPin::QueryInterface(REFIID aIId, void** aInterface)
-{
- if (aIId == IID_IAsyncReader) {
- mQueriedForAsyncReader = true;
- return DoGetInterface(static_cast<IAsyncReader*>(this), aInterface);
- }
-
- if (aIId == __uuidof(OutputPin)) {
- AddRef();
- *aInterface = this;
- return S_OK;
- }
-
- return BasePin::QueryInterface(aIId, aInterface);
-}
-
-HRESULT
-OutputPin::CheckConnect(IPin* aPin)
-{
- // Our connection is only suitable if the downstream pin knows
- // that we're asynchronous (i.e. it queried for IAsyncReader).
- return mQueriedForAsyncReader ? S_OK : S_FALSE;
-}
-
-HRESULT
-OutputPin::CheckMediaType(const MediaType* aMediaType)
-{
- const MediaType *myMediaType = mParentSource->GetMediaType();
-
- if (IsEqualGUID(aMediaType->majortype, myMediaType->majortype) &&
- IsEqualGUID(aMediaType->subtype, myMediaType->subtype) &&
- IsEqualGUID(aMediaType->formattype, myMediaType->formattype))
- {
- DIRECTSHOW_LOG("OutputPin::CheckMediaType() Match: major=%s minor=%s TC=%d FSS=%d SS=%u",
- GetDirectShowGuidName(aMediaType->majortype),
- GetDirectShowGuidName(aMediaType->subtype),
- aMediaType->TemporalCompression(),
- aMediaType->bFixedSizeSamples,
- aMediaType->SampleSize());
- return S_OK;
- }
-
- DIRECTSHOW_LOG("OutputPin::CheckMediaType() Failed to match: major=%s minor=%s TC=%d FSS=%d SS=%u",
- GetDirectShowGuidName(aMediaType->majortype),
- GetDirectShowGuidName(aMediaType->subtype),
- aMediaType->TemporalCompression(),
- aMediaType->bFixedSizeSamples,
- aMediaType->SampleSize());
- return S_FALSE;
-}
-
-HRESULT
-OutputPin::GetMediaType(int aPosition, MediaType* aMediaType)
-{
- if (!aMediaType)
- return E_POINTER;
-
- if (aPosition == 0) {
- aMediaType->Assign(mParentSource->GetMediaType());
- return S_OK;
- }
- return VFW_S_NO_MORE_ITEMS;
-}
-
-static inline bool
-IsPowerOf2(int32_t x) {
- return ((-x & x) != x);
-}
-
-STDMETHODIMP
-OutputPin::RequestAllocator(IMemAllocator* aPreferred,
- ALLOCATOR_PROPERTIES* aProps,
- IMemAllocator** aActual)
-{
- // Require the downstream pin to suggest what they want...
- if (!aPreferred) return E_POINTER;
- if (!aProps) return E_POINTER;
- if (!aActual) return E_POINTER;
-
- // We only care about alignment - our allocator will reject anything
- // which isn't power-of-2 aligned, so so try a 4-byte aligned allocator.
- ALLOCATOR_PROPERTIES props;
- memcpy(&props, aProps, sizeof(ALLOCATOR_PROPERTIES));
- if (aProps->cbAlign == 0 || IsPowerOf2(aProps->cbAlign)) {
- props.cbAlign = 4;
- }
-
- // Limit allocator's number of buffers. We know that the media will most
- // likely be bound by network speed, not by decoding speed. We also
- // store the incoming data in a Gecko stream, if we don't limit buffers
- // here we'll end up duplicating a lot of storage. We must have enough
- // space for audio key frames to fit in the first batch of buffers however,
- // else pausing may fail for some downstream decoders.
- if (props.cBuffers > BaseFilter::sMaxNumBuffers) {
- props.cBuffers = BaseFilter::sMaxNumBuffers;
- }
-
- // The allocator properties that are actually used. We don't store
- // this, we need it for SetProperties() below to succeed.
- ALLOCATOR_PROPERTIES actualProps;
- HRESULT hr;
-
- if (aPreferred) {
- // Play nice and prefer the downstream pin's preferred allocator.
- hr = aPreferred->SetProperties(&props, &actualProps);
- if (SUCCEEDED(hr)) {
- aPreferred->AddRef();
- *aActual = aPreferred;
- return S_OK;
- }
- }
-
- // Else downstream hasn't requested a specific allocator, so create one...
-
- // Just create a default allocator. It's highly unlikely that we'll use
- // this anyway, as most parsers insist on using their own allocators.
- RefPtr<IMemAllocator> allocator;
- hr = CoCreateInstance(CLSID_MemoryAllocator,
- 0,
- CLSCTX_INPROC_SERVER,
- IID_IMemAllocator,
- getter_AddRefs(allocator));
- if(FAILED(hr) || (allocator == nullptr)) {
- NS_WARNING("Can't create our own DirectShow allocator.");
- return hr;
- }
-
- // See if we can make it suitable
- hr = allocator->SetProperties(&props, &actualProps);
- if (SUCCEEDED(hr)) {
- // We need to release our refcount on pAlloc, and addref
- // it to pass a refcount to the caller - this is a net nothing.
- allocator.forget(aActual);
- return S_OK;
- }
-
- NS_WARNING("Failed to pick an allocator");
- return hr;
-}
-
-STDMETHODIMP
-OutputPin::Request(IMediaSample* aSample, DWORD_PTR aDwUser)
-{
- if (!aSample) return E_FAIL;
-
- CriticalSectionAutoEnter lock(*mLock);
- NS_ASSERTION(!mFlushCount, "Request() while flushing");
-
- if (mFlushCount)
- return VFW_E_WRONG_STATE;
-
- REFERENCE_TIME refStart = 0, refEnd = 0;
- if (FAILED(aSample->GetTime(&refStart, &refEnd))) {
- NS_WARNING("Sample incorrectly timestamped");
- return VFW_E_SAMPLE_TIME_NOT_SET;
- }
-
- // Convert reference time to bytes.
- uint32_t start = (uint32_t)(refStart / 10000000);
- uint32_t end = (uint32_t)(refEnd / 10000000);
-
- uint32_t numBytes = end - start;
-
- ReadRequest* request = new ReadRequest(aSample,
- aDwUser,
- start,
- numBytes);
- // Memory for |request| is free when it's popped from the completed
- // reads list.
-
- // Push this onto the queue of reads to be serviced.
- mPendingReads.Push(request);
-
- // Notify any threads blocked in WaitForNext() which are waiting for mPendingReads
- // to become non-empty.
- mSignal.Notify();
-
- return S_OK;
-}
-
-STDMETHODIMP
-OutputPin::WaitForNext(DWORD aTimeout,
- IMediaSample** aOutSample,
- DWORD_PTR* aOutDwUser)
-{
- NS_ASSERTION(aTimeout == 0 || aTimeout == INFINITE,
- "Oops, we don't handle this!");
-
- *aOutSample = nullptr;
- *aOutDwUser = 0;
-
- LONGLONG offset = 0;
- LONG count = 0;
- BYTE* buf = nullptr;
-
- {
- CriticalSectionAutoEnter lock(*mLock);
-
- // Wait until there's a pending read to service.
- while (aTimeout && mPendingReads.GetSize() == 0 && !mFlushCount) {
- // Note: No need to guard against shutdown-during-wait here, as
- // typically the thread doing the pull will have already called
- // Request(), so we won't Wait() here anyway. SyncRead() will fail
- // on shutdown.
- mSignal.Wait();
- }
-
- nsAutoPtr<ReadRequest> request(reinterpret_cast<ReadRequest*>(mPendingReads.PopFront()));
- if (!request)
- return VFW_E_WRONG_STATE;
-
- *aOutSample = request->mSample;
- *aOutDwUser = request->mDwUser;
-
- offset = request->mOffset;
- count = request->mCount;
- buf = nullptr;
- request->mSample->GetPointer(&buf);
- NS_ASSERTION(buf != nullptr, "Invalid buffer!");
-
- if (mFlushCount) {
- return VFW_E_TIMEOUT;
- }
- }
-
- return SyncRead(offset, count, buf);
-}
-
-STDMETHODIMP
-OutputPin::SyncReadAligned(IMediaSample* aSample)
-{
- {
- // Ignore reads while flushing.
- CriticalSectionAutoEnter lock(*mLock);
- if (mFlushCount) {
- return S_FALSE;
- }
- }
-
- if (!aSample)
- return E_FAIL;
-
- REFERENCE_TIME lStart = 0, lEnd = 0;
- if (FAILED(aSample->GetTime(&lStart, &lEnd))) {
- NS_WARNING("Sample incorrectly timestamped");
- return VFW_E_SAMPLE_TIME_NOT_SET;
- }
-
- // Convert reference time to bytes.
- int32_t start = (int32_t)(lStart / 10000000);
- int32_t end = (int32_t)(lEnd / 10000000);
-
- int32_t numBytes = end - start;
-
- // If the range extends off the end of stream, truncate to the end of stream
- // as per IAsyncReader specificiation.
- int64_t streamLength = mResource.GetLength();
- if (streamLength != -1) {
- // We know the exact length of the stream, fail if the requested offset
- // is beyond it.
- if (start > streamLength) {
- return VFW_E_BADALIGN;
- }
-
- // If the end of the chunk to read is off the end of the stream,
- // truncate it to the end of the stream.
- if ((start + numBytes) > streamLength) {
- numBytes = (uint32_t)(streamLength - start);
- }
- }
-
- BYTE* buf=0;
- aSample->GetPointer(&buf);
-
- return SyncRead(start, numBytes, buf);
-}
-
-STDMETHODIMP
-OutputPin::SyncRead(LONGLONG aPosition,
- LONG aLength,
- BYTE* aBuffer)
-{
- MOZ_ASSERT(!NS_IsMainThread());
- NS_ENSURE_TRUE(aPosition >= 0, E_FAIL);
- NS_ENSURE_TRUE(aLength > 0, E_FAIL);
- NS_ENSURE_TRUE(aBuffer, E_POINTER);
-
- DIRECTSHOW_LOG("OutputPin::SyncRead(%lld, %d)", aPosition, aLength);
- {
- // Ignore reads while flushing.
- CriticalSectionAutoEnter lock(*mLock);
- if (mFlushCount) {
- return S_FALSE;
- }
- }
-
- uint32_t totalBytesRead = 0;
- nsresult rv = mResource.ReadAt(aPosition,
- reinterpret_cast<char*>(aBuffer),
- aLength,
- &totalBytesRead);
- if (NS_FAILED(rv)) {
- return E_FAIL;
- }
- if (totalBytesRead > 0) {
- CriticalSectionAutoEnter lock(*mLock);
- mBytesConsumed += totalBytesRead;
- }
- return (totalBytesRead == aLength) ? S_OK : S_FALSE;
-}
-
-STDMETHODIMP
-OutputPin::Length(LONGLONG* aTotal, LONGLONG* aAvailable)
-{
- HRESULT hr = S_OK;
- int64_t length = mResource.GetLength();
- if (length == -1) {
- hr = VFW_S_ESTIMATED;
- // Don't have a length. Just lie, it seems to work...
- *aTotal = INT32_MAX;
- } else {
- *aTotal = length;
- }
- if (aAvailable) {
- *aAvailable = mResource.GetCachedDataEnd();
- }
-
- DIRECTSHOW_LOG("OutputPin::Length() len=%lld avail=%lld", *aTotal, *aAvailable);
-
- return hr;
-}
-
-STDMETHODIMP
-OutputPin::BeginFlush()
-{
- CriticalSectionAutoEnter lock(*mLock);
- mFlushCount++;
- mSignal.Notify();
- return S_OK;
-}
-
-STDMETHODIMP
-OutputPin::EndFlush(void)
-{
- CriticalSectionAutoEnter lock(*mLock);
- mFlushCount--;
- return S_OK;
-}
-
-uint32_t
-OutputPin::GetAndResetBytesConsumedCount()
-{
- CriticalSectionAutoEnter lock(*mLock);
- uint32_t bytesConsumed = mBytesConsumed;
- mBytesConsumed = 0;
- return bytesConsumed;
-}
-
-SourceFilter::SourceFilter(const GUID& aMajorType,
- const GUID& aSubType)
- : BaseFilter(L"MozillaDirectShowSource", __uuidof(SourceFilter))
-{
- MOZ_COUNT_CTOR(SourceFilter);
- mMediaType.majortype = aMajorType;
- mMediaType.subtype = aSubType;
-
- DIRECTSHOW_LOG("SourceFilter Constructor(%s, %s)",
- GetDirectShowGuidName(aMajorType),
- GetDirectShowGuidName(aSubType));
-}
-
-SourceFilter::~SourceFilter()
-{
- MOZ_COUNT_DTOR(SourceFilter);
- DIRECTSHOW_LOG("SourceFilter Destructor()");
-}
-
-BasePin*
-SourceFilter::GetPin(int n)
-{
- if (n == 0) {
- NS_ASSERTION(mOutputPin != 0, "GetPin with no pin!");
- return static_cast<BasePin*>(mOutputPin);
- } else {
- return nullptr;
- }
-}
-
-// Get's the media type we're supplying.
-const MediaType*
-SourceFilter::GetMediaType() const
-{
- return &mMediaType;
-}
-
-nsresult
-SourceFilter::Init(MediaResource* aResource, int64_t aMP3Offset)
-{
- DIRECTSHOW_LOG("SourceFilter::Init()");
-
- mOutputPin = new OutputPin(aResource,
- this,
- mLock,
- aMP3Offset);
- NS_ENSURE_TRUE(mOutputPin != nullptr, NS_ERROR_FAILURE);
-
- return NS_OK;
-}
-
-uint32_t
-SourceFilter::GetAndResetBytesConsumedCount()
-{
- return mOutputPin->GetAndResetBytesConsumedCount();
-}
-
-
-} // namespace mozilla
diff --git a/dom/media/directshow/SourceFilter.h b/dom/media/directshow/SourceFilter.h
deleted file mode 100644
index d5ce2770e..000000000
--- a/dom/media/directshow/SourceFilter.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#if !defined(nsDirectShowSource_h___)
-#define nsDirectShowSource_h___
-
-#include "BaseFilter.h"
-#include "BasePin.h"
-#include "MediaType.h"
-
-#include "nsDeque.h"
-#include "nsAutoPtr.h"
-#include "DirectShowUtils.h"
-#include "mozilla/RefPtr.h"
-
-namespace mozilla {
-
-class MediaResource;
-class OutputPin;
-
-
-// SourceFilter is an asynchronous DirectShow source filter which
-// reads from an MediaResource, and supplies data via a pull model downstream
-// using OutputPin. It us used to supply a generic byte stream into
-// DirectShow.
-//
-// Implements:
-// * IBaseFilter
-// * IMediaFilter
-// * IPersist
-// * IUnknown
-//
-class DECLSPEC_UUID("5c2a7ad0-ba82-4659-9178-c4719a2765d6")
-SourceFilter : public media::BaseFilter
-{
-public:
-
- // Constructs source filter to deliver given media type.
- SourceFilter(const GUID& aMajorType, const GUID& aSubType);
- ~SourceFilter();
-
- nsresult Init(MediaResource *aResource, int64_t aMP3Offset);
-
- // BaseFilter overrides.
- // Only one output - the byte stream.
- int GetPinCount() override { return 1; }
-
- media::BasePin* GetPin(int n) override;
-
- // Get's the media type we're supplying.
- const media::MediaType* GetMediaType() const;
-
- uint32_t GetAndResetBytesConsumedCount();
-
-protected:
-
- // Our async pull output pin.
- nsAutoPtr<OutputPin> mOutputPin;
-
- // Type of byte stream we output.
- media::MediaType mMediaType;
-
-};
-
-} // namespace mozilla
-
-// For mingw __uuidof support
-#ifdef __CRT_UUID_DECL
-__CRT_UUID_DECL(mozilla::SourceFilter, 0x5c2a7ad0,0xba82,0x4659,0x91,0x78,0xc4,0x71,0x9a,0x27,0x65,0xd6);
-#endif
-
-#endif
diff --git a/dom/media/directshow/moz.build b/dom/media/directshow/moz.build
deleted file mode 100644
index 8a9b76200..000000000
--- a/dom/media/directshow/moz.build
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
-# vim: set filetype=python:
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-EXPORTS += [
- 'AudioSinkFilter.h',
- 'AudioSinkInputPin.h',
- 'DirectShowDecoder.h',
- 'DirectShowReader.h',
- 'DirectShowUtils.h',
-]
-
-UNIFIED_SOURCES += [
- 'DirectShowDecoder.cpp',
- 'DirectShowUtils.cpp',
- 'SourceFilter.cpp',
-]
-
-SOURCES += [
- 'AudioSinkFilter.cpp',
- 'AudioSinkInputPin.cpp',
- 'DirectShowReader.cpp',
- 'SampleSink.cpp',
-]
-
-# If WebRTC isn't being built, we need to compile the DirectShow base classes so that
-# they're available at link time.
-if not CONFIG['MOZ_WEBRTC']:
- SOURCES += [
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/BaseFilter.cpp',
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/BaseInputPin.cpp',
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/BasePin.cpp',
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows/MediaType.cpp',
- ]
-
-FINAL_LIBRARY = 'xul'
-LOCAL_INCLUDES += [
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows',
-]
diff --git a/dom/media/fmp4/MP4Decoder.cpp b/dom/media/fmp4/MP4Decoder.cpp
index 25dd53f94..6954e9757 100644
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -83,10 +83,6 @@ MP4Decoder::CanHandleMediaType(const MediaContentType& aType,
const bool isMP4Audio = aType.GetMIMEType().EqualsASCII("audio/mp4") ||
aType.GetMIMEType().EqualsASCII("audio/x-m4a");
const bool isMP4Video =
- // On B2G, treat 3GPP as MP4 when Gonk PDM is available.
-#ifdef MOZ_GONK_MEDIACODEC
- aType.GetMIMEType().EqualsASCII(VIDEO_3GPP) ||
-#endif
aType.GetMIMEType().EqualsASCII("video/mp4") ||
aType.GetMIMEType().EqualsASCII("video/quicktime") ||
aType.GetMIMEType().EqualsASCII("video/x-m4v");
diff --git a/dom/media/fmp4/MP4Demuxer.cpp b/dom/media/fmp4/MP4Demuxer.cpp
index 5a637b003..ef68d5dca 100644
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -16,9 +16,6 @@
#include "mp4_demuxer/Index.h"
#include "nsPrintfCString.h"
-// Used for telemetry
-#include "mozilla/Telemetry.h"
-#include "mp4_demuxer/AnnexB.h"
#include "mp4_demuxer/H264.h"
#include "nsAutoPtr.h"
@@ -72,23 +69,10 @@ private:
// Queued samples extracted by the demuxer, but not yet returned.
RefPtr<MediaRawData> mQueuedSample;
bool mNeedReIndex;
- bool mNeedSPSForTelemetry;
bool mIsH264 = false;
};
-// Returns true if no SPS was found and search for it should continue.
-bool
-AccumulateSPSTelemetry(const MediaByteBuffer* aExtradata)
-{
- // XXX: Do we still need this without telemetry?
- mp4_demuxer::SPSData spsdata;
- if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtradata, spsdata)) {
- return false;
- }
- return true;
-}
-
MP4Demuxer::MP4Demuxer(MediaResource* aResource)
: mResource(aResource)
, mStream(new mp4_demuxer::ResourceStream(aResource))
@@ -219,25 +203,10 @@ MP4TrackDemuxer::MP4TrackDemuxer(MP4Demuxer* aParent,
EnsureUpToDateIndex(); // Force update of index
VideoInfo* videoInfo = mInfo->GetAsVideoInfo();
- // Collect telemetry from h264 AVCC SPS.
if (videoInfo &&
(mInfo->mMimeType.EqualsLiteral("video/mp4") ||
mInfo->mMimeType.EqualsLiteral("video/avc"))) {
mIsH264 = true;
- RefPtr<MediaByteBuffer> extraData = videoInfo->mExtraData;
- mNeedSPSForTelemetry = AccumulateSPSTelemetry(extraData);
- mp4_demuxer::SPSData spsdata;
- if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata) &&
- spsdata.pic_width > 0 && spsdata.pic_height > 0 &&
- mp4_demuxer::H264::EnsureSPSIsSane(spsdata)) {
- videoInfo->mImage.width = spsdata.pic_width;
- videoInfo->mImage.height = spsdata.pic_height;
- videoInfo->mDisplay.width = spsdata.display_width;
- videoInfo->mDisplay.height = spsdata.display_height;
- }
- } else {
- // No SPS to be found.
- mNeedSPSForTelemetry = false;
}
}
@@ -364,15 +333,6 @@ MP4TrackDemuxer::GetSamples(int32_t aNumSamples)
if (samples->mSamples.IsEmpty()) {
return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
} else {
- for (const auto& sample : samples->mSamples) {
- // Collect telemetry from h264 Annex B SPS.
- if (mNeedSPSForTelemetry && mp4_demuxer::AnnexB::HasSPS(sample)) {
- RefPtr<MediaByteBuffer> extradata =
- mp4_demuxer::AnnexB::ExtractExtraData(sample);
- mNeedSPSForTelemetry = AccumulateSPSTelemetry(extradata);
- }
- }
-
if (mNextKeyframeTime.isNothing() ||
samples->mSamples.LastElement()->mTime >= mNextKeyframeTime.value().ToMicroseconds()) {
SetNextKeyFrameTime();
diff --git a/dom/media/fmp4/MP4Stream.cpp b/dom/media/fmp4/MP4Stream.cpp
index 615a7dc01..9a79cac7a 100644
--- a/dom/media/fmp4/MP4Stream.cpp
+++ b/dom/media/fmp4/MP4Stream.cpp
@@ -48,9 +48,6 @@ MP4Stream::BlockingReadIntoCache(int64_t aOffset, size_t aCount, Monitor* aToUnl
return true;
}
-// We surreptitiously reimplement the supposedly-blocking ReadAt as a non-
-// blocking CachedReadAt, and record when it fails. This allows MP4Reader
-// to retry the read as an actual blocking read without holding the lock.
bool
MP4Stream::ReadAt(int64_t aOffset, void* aBuffer, size_t aCount,
size_t* aBytesRead)
diff --git a/dom/media/fmp4/moz.build b/dom/media/fmp4/moz.build
index 6a249ae3e..a79fb0229 100644
--- a/dom/media/fmp4/moz.build
+++ b/dom/media/fmp4/moz.build
@@ -20,6 +20,3 @@ SOURCES += [
]
FINAL_LIBRARY = 'xul'
-
-if CONFIG['MOZ_GONK_MEDIACODEC']:
- DEFINES['MOZ_GONK_MEDIACODEC'] = True
diff --git a/dom/media/gtest/Cargo.toml b/dom/media/gtest/Cargo.toml
deleted file mode 100644
index a55f8fb68..000000000
--- a/dom/media/gtest/Cargo.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[package]
-name = "mp4parse-gtest"
-version = "0.1.0"
-authors = ["nobody@mozilla.org"]
-
-[lib]
-path = "hello.rs"
diff --git a/dom/media/gtest/TestMP3Demuxer.cpp b/dom/media/gtest/TestMP3Demuxer.cpp
index 8d2109f00..934acb60e 100644
--- a/dom/media/gtest/TestMP3Demuxer.cpp
+++ b/dom/media/gtest/TestMP3Demuxer.cpp
@@ -11,7 +11,6 @@
#include "MockMediaResource.h"
using namespace mozilla;
-using namespace mozilla::mp3;
using media::TimeUnit;
diff --git a/dom/media/gtest/TestMP4Reader.cpp b/dom/media/gtest/TestMP4Reader.cpp
deleted file mode 100644
index f08f7a40d..000000000
--- a/dom/media/gtest/TestMP4Reader.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "gtest/gtest.h"
-#include "MP4Reader.h"
-#include "MP4Decoder.h"
-#include "mozilla/SharedThreadPool.h"
-#include "MockMediaResource.h"
-#include "MockMediaDecoderOwner.h"
-#include "mozilla/Preferences.h"
-#include "TimeUnits.h"
-
-using namespace mozilla;
-using namespace mozilla::dom;
-
-class TestBinding
-{
-public:
- NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TestBinding);
-
- RefPtr<MP4Decoder> decoder;
- RefPtr<MockMediaResource> resource;
- RefPtr<MP4Reader> reader;
-
- explicit TestBinding(const char* aFileName = "gizmo.mp4")
- : decoder(new MP4Decoder())
- , resource(new MockMediaResource(aFileName))
- , reader(new MP4Reader(decoder))
- {
- EXPECT_EQ(NS_OK, Preferences::SetBool(
- "media.use-blank-decoder", true));
-
- EXPECT_EQ(NS_OK, resource->Open(nullptr));
- decoder->SetResource(resource);
-
- reader->Init(nullptr);
- // This needs to be done before invoking GetBuffered. This is normally
- // done by MediaDecoderStateMachine.
- reader->DispatchSetStartTime(0);
- }
-
- void Init() {
- nsCOMPtr<nsIThread> thread;
- nsCOMPtr<nsIRunnable> r = NewRunnableMethod(this, &TestBinding::ReadMetadata);
- nsresult rv = NS_NewThread(getter_AddRefs(thread), r);
- EXPECT_EQ(NS_OK, rv);
- thread->Shutdown();
- }
-
-private:
- virtual ~TestBinding()
- {
- {
- RefPtr<TaskQueue> queue = reader->OwnerThread();
- nsCOMPtr<nsIRunnable> task = NewRunnableMethod(reader, &MP4Reader::Shutdown);
- // Hackily bypass the tail dispatcher so that we can AwaitShutdownAndIdle.
- // In production code we'd use BeginShutdown + promises.
- queue->Dispatch(task.forget(), AbstractThread::AssertDispatchSuccess,
- AbstractThread::TailDispatch);
- queue->AwaitShutdownAndIdle();
- }
- decoder = nullptr;
- resource = nullptr;
- reader = nullptr;
- SharedThreadPool::SpinUntilEmpty();
- }
-
- void ReadMetadata()
- {
- MediaInfo info;
- MetadataTags* tags;
- EXPECT_EQ(NS_OK, reader->ReadMetadata(&info, &tags));
- }
-};
-
-TEST(MP4Reader, BufferedRange)
-{
- RefPtr<TestBinding> b = new TestBinding();
- b->Init();
-
- // Video 3-4 sec, audio 2.986666-4.010666 sec
- b->resource->MockAddBufferedRange(248400, 327455);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
- EXPECT_NEAR(270000 / 90000.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(360000 / 90000.0, ranges.End(0).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, BufferedRangeMissingLastByte)
-{
- RefPtr<TestBinding> b = new TestBinding();
- b->Init();
-
- // Dropping the last byte of the video
- b->resource->MockClearBufferedRanges();
- b->resource->MockAddBufferedRange(248400, 324912);
- b->resource->MockAddBufferedRange(324913, 327455);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
- EXPECT_NEAR(270000.0 / 90000.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(357000 / 90000.0, ranges.End(0).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, BufferedRangeSyncFrame)
-{
- RefPtr<TestBinding> b = new TestBinding();
- b->Init();
-
- // Check that missing the first byte at 2 seconds skips right through to 3
- // seconds because of a missing sync frame
- b->resource->MockClearBufferedRanges();
- b->resource->MockAddBufferedRange(146336, 327455);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
- EXPECT_NEAR(270000.0 / 90000.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(360000 / 90000.0, ranges.End(0).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, CompositionOrder)
-{
- RefPtr<TestBinding> b = new TestBinding("mediasource_test.mp4");
- b->Init();
-
- // The first 5 video samples of this file are:
- // Video timescale=2500
- // Frame Start Size Time Duration Sync
- // 1 48 5455 166 83 Yes
- // 2 5503 145 249 83
- // 3 6228 575 581 83
- // 4 7383 235 415 83
- // 5 8779 183 332 83
- // 6 9543 191 498 83
- //
- // Audio timescale=44100
- // 1 5648 580 0 1024 Yes
- // 2 6803 580 1024 1058 Yes
- // 3 7618 581 2082 1014 Yes
- // 4 8199 580 3096 1015 Yes
- // 5 8962 581 4111 1014 Yes
- // 6 9734 580 5125 1014 Yes
- // 7 10314 581 6139 1059 Yes
- // 8 11207 580 7198 1014 Yes
- // 9 12035 581 8212 1014 Yes
- // 10 12616 580 9226 1015 Yes
- // 11 13220 581 10241 1014 Yes
-
- b->resource->MockClearBufferedRanges();
- // First two frames in decoding + first audio frame
- b->resource->MockAddBufferedRange(48, 5503); // Video 1
- b->resource->MockAddBufferedRange(5503, 5648); // Video 2
- b->resource->MockAddBufferedRange(6228, 6803); // Video 3
-
- // Audio - 5 frames; 0 - 139206 us
- b->resource->MockAddBufferedRange(5648, 6228);
- b->resource->MockAddBufferedRange(6803, 7383);
- b->resource->MockAddBufferedRange(7618, 8199);
- b->resource->MockAddBufferedRange(8199, 8779);
- b->resource->MockAddBufferedRange(8962, 9563);
- b->resource->MockAddBufferedRange(9734, 10314);
- b->resource->MockAddBufferedRange(10314, 10895);
- b->resource->MockAddBufferedRange(11207, 11787);
- b->resource->MockAddBufferedRange(12035, 12616);
- b->resource->MockAddBufferedRange(12616, 13196);
- b->resource->MockAddBufferedRange(13220, 13901);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(2U, ranges.Length());
-
- EXPECT_NEAR(166.0 / 2500.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(332.0 / 2500.0, ranges.End(0).ToSeconds(), 0.000001);
-
- EXPECT_NEAR(581.0 / 2500.0, ranges.Start(1).ToSeconds(), 0.000001);
- EXPECT_NEAR(11255.0 / 44100.0, ranges.End(1).ToSeconds(), 0.000001);
-}
-
-TEST(MP4Reader, Normalised)
-{
- RefPtr<TestBinding> b = new TestBinding("mediasource_test.mp4");
- b->Init();
-
- // The first 5 video samples of this file are:
- // Video timescale=2500
- // Frame Start Size Time Duration Sync
- // 1 48 5455 166 83 Yes
- // 2 5503 145 249 83
- // 3 6228 575 581 83
- // 4 7383 235 415 83
- // 5 8779 183 332 83
- // 6 9543 191 498 83
- //
- // Audio timescale=44100
- // 1 5648 580 0 1024 Yes
- // 2 6803 580 1024 1058 Yes
- // 3 7618 581 2082 1014 Yes
- // 4 8199 580 3096 1015 Yes
- // 5 8962 581 4111 1014 Yes
- // 6 9734 580 5125 1014 Yes
- // 7 10314 581 6139 1059 Yes
- // 8 11207 580 7198 1014 Yes
- // 9 12035 581 8212 1014 Yes
- // 10 12616 580 9226 1015 Yes
- // 11 13220 581 10241 1014 Yes
-
- b->resource->MockClearBufferedRanges();
- b->resource->MockAddBufferedRange(48, 13901);
-
- media::TimeIntervals ranges = b->reader->GetBuffered();
- EXPECT_EQ(1U, ranges.Length());
-
- EXPECT_NEAR(166.0 / 2500.0, ranges.Start(0).ToSeconds(), 0.000001);
- EXPECT_NEAR(11255.0 / 44100.0, ranges.End(0).ToSeconds(), 0.000001);
-}
diff --git a/dom/media/gtest/TestRust.cpp b/dom/media/gtest/TestRust.cpp
deleted file mode 100644
index 86d0e99b8..000000000
--- a/dom/media/gtest/TestRust.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <stdint.h>
-#include "gtest/gtest.h"
-
-extern "C" uint8_t* test_rust();
-
-TEST(rust, CallFromCpp) {
- auto greeting = test_rust();
- EXPECT_STREQ(reinterpret_cast<char*>(greeting), "hello from rust.");
-}
diff --git a/dom/media/gtest/hello.rs b/dom/media/gtest/hello.rs
deleted file mode 100644
index cd111882a..000000000
--- a/dom/media/gtest/hello.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-#[no_mangle]
-pub extern fn test_rust() -> *const u8 {
- // NB: rust &str aren't null terminated.
- let greeting = "hello from rust.\0";
- greeting.as_ptr()
-}
diff --git a/dom/media/gtest/moz.build b/dom/media/gtest/moz.build
index d5d02bced..a7ea73807 100644
--- a/dom/media/gtest/moz.build
+++ b/dom/media/gtest/moz.build
@@ -21,7 +21,6 @@ UNIFIED_SOURCES += [
'TestMozPromise.cpp',
'TestMP3Demuxer.cpp',
'TestMP4Demuxer.cpp',
- # 'TestMP4Reader.cpp', disabled so we can turn check tests back on (bug 1175752)
'TestTrackEncoder.cpp',
'TestVideoSegment.cpp',
'TestVideoUtils.cpp',
diff --git a/dom/media/mediasource/moz.build b/dom/media/mediasource/moz.build
index 6ded1875d..a1689c216 100644
--- a/dom/media/mediasource/moz.build
+++ b/dom/media/mediasource/moz.build
@@ -38,9 +38,6 @@ TEST_DIRS += [
'gtest',
]
-if CONFIG['MOZ_GONK_MEDIACODEC']:
- DEFINES['MOZ_GONK_MEDIACODEC'] = True
-
include('/ipc/chromium/chromium-config.mozbuild')
FINAL_LIBRARY = 'xul'
diff --git a/dom/media/moz.build b/dom/media/moz.build
index 4d036a5f6..df8cb619d 100644
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -30,6 +30,7 @@ DIRS += [
'ipc',
'mediasink',
'mediasource',
+ 'mp3',
'ogg',
'platforms',
'systemservices',
@@ -42,12 +43,6 @@ DIRS += [
'standalone',
]
-if CONFIG['MOZ_DIRECTSHOW']:
- DIRS += ['directshow']
-
-if CONFIG['MOZ_ANDROID_OMX']:
- DIRS += ['android']
-
if CONFIG['MOZ_FMP4']:
DIRS += ['fmp4']
@@ -128,9 +123,6 @@ EXPORTS += [
'MediaTimer.h',
'MediaTrack.h',
'MediaTrackList.h',
- 'MP3Decoder.h',
- 'MP3Demuxer.h',
- 'MP3FrameParser.h',
'NextFrameSeekTask.h',
'nsIDocumentActivity.h',
'PrincipalChangeObserver.h',
@@ -237,9 +229,6 @@ UNIFIED_SOURCES += [
'MediaTimer.cpp',
'MediaTrack.cpp',
'MediaTrackList.cpp',
- 'MP3Decoder.cpp',
- 'MP3Demuxer.cpp',
- 'MP3FrameParser.cpp',
'NextFrameSeekTask.cpp',
'QueueObject.cpp',
'SeekJob.cpp',
@@ -294,11 +283,6 @@ LOCAL_INCLUDES += [
'/netwerk/base',
]
-if CONFIG['MOZ_DIRECTSHOW']:
- LOCAL_INCLUDES += [
- '/media/webrtc/trunk/webrtc/modules/video_capture/windows',
- ]
-
if CONFIG['MOZ_WEBRTC']:
LOCAL_INCLUDES += [
'/media/webrtc/signaling/src/common',
diff --git a/dom/media/MP3Decoder.cpp b/dom/media/mp3/MP3Decoder.cpp
index b71111e79..074a0866d 100644
--- a/dom/media/MP3Decoder.cpp
+++ b/dom/media/mp3/MP3Decoder.cpp
@@ -24,7 +24,7 @@ MP3Decoder::Clone(MediaDecoderOwner* aOwner) {
MediaDecoderStateMachine*
MP3Decoder::CreateStateMachine() {
RefPtr<MediaDecoderReader> reader =
- new MediaFormatReader(this, new mp3::MP3Demuxer(GetResource()));
+ new MediaFormatReader(this, new MP3Demuxer(GetResource()));
return new MediaDecoderStateMachine(this, reader);
}
diff --git a/dom/media/MP3Decoder.h b/dom/media/mp3/MP3Decoder.h
index 887251065..887251065 100644
--- a/dom/media/MP3Decoder.h
+++ b/dom/media/mp3/MP3Decoder.h
diff --git a/dom/media/MP3Demuxer.cpp b/dom/media/mp3/MP3Demuxer.cpp
index 7d478a41b..5a98cabfe 100644
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/mp3/MP3Demuxer.cpp
@@ -33,7 +33,6 @@ using mozilla::media::TimeIntervals;
using mp4_demuxer::ByteReader;
namespace mozilla {
-namespace mp3 {
// MP3Demuxer
@@ -1338,5 +1337,4 @@ ID3Parser::ID3Header::Update(uint8_t c) {
return IsValid(mPos++);
}
-} // namespace mp3
} // namespace mozilla
diff --git a/dom/media/MP3Demuxer.h b/dom/media/mp3/MP3Demuxer.h
index 03e67b0d9..5331c4d54 100644
--- a/dom/media/MP3Demuxer.h
+++ b/dom/media/mp3/MP3Demuxer.h
@@ -13,7 +13,6 @@
#include <vector>
namespace mozilla {
-namespace mp3 {
class MP3TrackDemuxer;
@@ -468,7 +467,6 @@ private:
UniquePtr<AudioInfo> mInfo;
};
-} // namespace mp3
} // namespace mozilla
#endif
diff --git a/dom/media/mp3/moz.build b/dom/media/mp3/moz.build
new file mode 100644
index 000000000..596d061f8
--- /dev/null
+++ b/dom/media/mp3/moz.build
@@ -0,0 +1,17 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'MP3Decoder.h',
+ 'MP3Demuxer.h',
+]
+
+UNIFIED_SOURCES += [
+ 'MP3Decoder.cpp',
+ 'MP3Demuxer.cpp',
+]
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/MediaTelemetryConstants.h b/dom/media/platforms/MediaTelemetryConstants.h
deleted file mode 100644
index 5024949a8..000000000
--- a/dom/media/platforms/MediaTelemetryConstants.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-#ifndef dom_media_platforms_MediaTelemetryConstants_h___
-#define dom_media_platforms_MediaTelemetryConstants_h___
-
-namespace mozilla {
-namespace media {
-
-enum class MediaDecoderBackend : uint32_t
-{
- WMFSoftware = 0,
- WMFDXVA2D3D9 = 1,
- WMFDXVA2D3D11 = 2
-};
-
-} // namespace media
-} // namespace mozilla
-
-#endif // dom_media_platforms_MediaTelemetryConstants_h___
diff --git a/dom/media/platforms/PDMFactory.cpp b/dom/media/platforms/PDMFactory.cpp
index c1e58fdc2..5bfdcffb7 100644
--- a/dom/media/platforms/PDMFactory.cpp
+++ b/dom/media/platforms/PDMFactory.cpp
@@ -19,9 +19,6 @@
#ifdef MOZ_APPLEMEDIA
#include "AppleDecoderModule.h"
#endif
-#ifdef MOZ_GONK_MEDIACODEC
-#include "GonkDecoderModule.h"
-#endif
#ifdef MOZ_WIDGET_ANDROID
#include "AndroidDecoderModule.h"
#endif
@@ -390,12 +387,6 @@ PDMFactory::CreatePDMs()
m = new AppleDecoderModule();
StartupPDM(m);
#endif
-#ifdef MOZ_GONK_MEDIACODEC
- if (MediaPrefs::PDMGonkDecoderEnabled()) {
- m = new GonkDecoderModule();
- StartupPDM(m);
- }
-#endif
#ifdef MOZ_WIDGET_ANDROID
if(MediaPrefs::PDMAndroidMediaCodecEnabled()){
m = new AndroidDecoderModule();
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
index e1c326818..426e9f74b 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -145,6 +145,8 @@ FFmpegLibWrapper::Link()
AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
AV_FUNC(avcodec_free_frame, AV_FUNC_54)
+ AV_FUNC(avcodec_send_packet, AV_FUNC_58)
+ AV_FUNC(avcodec_receive_frame, AV_FUNC_58)
AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_freep, AV_FUNC_AVUTIL_ALL)
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
index c6c43a4ae..b968edd32 100644
--- a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
@@ -71,6 +71,10 @@ struct FFmpegLibWrapper
// libavcodec v54 only
void (*avcodec_free_frame)(AVFrame** frame);
+ // libavcodec v58 and later only
+ int (*avcodec_send_packet)(AVCodecContext* avctx, const AVPacket* avpkt);
+ int (*avcodec_receive_frame)(AVCodecContext* avctx, AVFrame* frame);
+
// libavutil
void (*av_log_set_level)(int level);
void* (*av_malloc)(size_t size);
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
index aec1e9136..f3101e44c 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -174,12 +174,15 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
size_t inputSize = aSample->Size();
-#if LIBAVCODEC_VERSION_MAJOR >= 54
+#if LIBAVCODEC_VERSION_MAJOR >= 54 && LIBAVCODEC_VERSION_MAJOR < 58
if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 55 && LIBAVCODEC_VERSION_MAJOR < 58
|| mCodecID == AV_CODEC_ID_VP9
#endif
- )) {
+ ))
+#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ {
while (inputSize) {
uint8_t* data = inputData;
int size = inputSize;
@@ -224,6 +227,48 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
packet.pos = aSample->mOffset;
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+ packet.duration = aSample->mDuration;
+ int res = mLib->avcodec_send_packet(mCodecContext, &packet);
+ if (res < 0) {
+ // In theory, avcodec_send_packet could sent -EAGAIN should its internal
+ // buffers be full. In practice this can't happen as we only feed one frame
+ // at a time, and we immediately call avcodec_receive_frame right after.
+ FFMPEG_LOG("avcodec_send_packet error: %d", res);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("avcodec_send_packet error: %d", res));
+ }
+
+ if (aGotFrame) {
+ *aGotFrame = false;
+ }
+ do {
+ if (!PrepareFrame()) {
+ NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ res = mLib->avcodec_receive_frame(mCodecContext, mFrame);
+ if (res == int(AVERROR_EOF)) {
+ return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
+ }
+ if (res == AVERROR(EAGAIN)) {
+ return NS_OK;
+ }
+ if (res < 0) {
+ FFMPEG_LOG("avcodec_receive_frame error: %d", res);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("avcodec_receive_frame error: %d", res));
+ }
+ MediaResult rv = CreateImage(mFrame->pkt_pos, mFrame->pkt_pts,
+ mFrame->pkt_duration);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (aGotFrame) {
+ *aGotFrame = true;
+ }
+ } while (true);
+#else
// LibAV provides no API to retrieve the decoded sample's duration.
// (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
// As such we instead use a map using the dts as key that we will retrieve
@@ -276,8 +321,21 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
// against the map becoming extremely big.
mDurationMap.Clear();
}
+
+ MediaResult rv = CreateImage(aSample->mOffset, pts, duration);
+ if (NS_SUCCEEDED(rv) && aGotFrame) {
+ *aGotFrame = true;
+ }
+ return rv;
+#endif
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::CreateImage(int64_t aOffset, int64_t aPts,
+ int64_t aDuration)
+{
FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
- pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
+ aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = mFrame->data[0];
@@ -317,9 +375,9 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mInfo,
mImageContainer,
- aSample->mOffset,
- pts,
- duration,
+ aOffset,
+ aPts,
+ aDuration,
b,
!!mFrame->key_frame,
-1,
@@ -331,9 +389,6 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RESULT_DETAIL("image allocation error"));
}
mCallback->Output(v);
- if (aGotFrame) {
- *aGotFrame = true;
- }
return NS_OK;
}
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
index 786df0da1..49a55e8a6 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -49,6 +49,7 @@ private:
MediaResult DoDecode(MediaRawData* aSample) override;
MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame);
MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame);
+ MediaResult CreateImage(int64_t aOffset, int64_t aPts, int64_t aDuration);
void ProcessDrain() override;
void ProcessFlush() override;
void OutputDelayedFrames();
diff --git a/dom/media/platforms/moz.build b/dom/media/platforms/moz.build
index be13d31c4..f5fb72c5d 100644
--- a/dom/media/platforms/moz.build
+++ b/dom/media/platforms/moz.build
@@ -10,7 +10,6 @@ EXPORTS += [
'agnostic/TheoraDecoder.h',
'agnostic/VorbisDecoder.h',
'agnostic/VPXDecoder.h',
- 'MediaTelemetryConstants.h',
'PDMFactory.h',
'PlatformDecoderModule.h',
'wrappers/FuzzingWrapper.h',
diff --git a/dom/media/platforms/omx/OmxPlatformLayer.cpp b/dom/media/platforms/omx/OmxPlatformLayer.cpp
index 039b4a22f..15b3062a4 100644
--- a/dom/media/platforms/omx/OmxPlatformLayer.cpp
+++ b/dom/media/platforms/omx/OmxPlatformLayer.cpp
@@ -282,26 +282,7 @@ OmxPlatformLayer::CompressionFormat()
}
}
-// Implementations for different platforms will be defined in their own files.
-#ifdef OMX_PLATFORM_GONK
-
-bool
-OmxPlatformLayer::SupportsMimeType(const nsACString& aMimeType)
-{
- return GonkOmxPlatformLayer::FindComponents(aMimeType);
-}
-
-OmxPlatformLayer*
-OmxPlatformLayer::Create(OmxDataDecoder* aDataDecoder,
- OmxPromiseLayer* aPromiseLayer,
- TaskQueue* aTaskQueue,
- layers::ImageContainer* aImageContainer)
-{
- return new GonkOmxPlatformLayer(aDataDecoder, aPromiseLayer, aTaskQueue, aImageContainer);
-}
-
-#else // For platforms without OMX IL support.
-
+// For platforms without OMX IL support.
bool
OmxPlatformLayer::SupportsMimeType(const nsACString& aMimeType)
{
@@ -317,6 +298,4 @@ OmxPlatformLayer::Create(OmxDataDecoder* aDataDecoder,
return nullptr;
}
-#endif
-
}
diff --git a/dom/media/platforms/wmf/DXVA2Manager.cpp b/dom/media/platforms/wmf/DXVA2Manager.cpp
index 1226ea621..69e002f7f 100644
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -14,7 +14,6 @@
#include "mozilla/layers/D3D11ShareHandleImage.h"
#include "mozilla/layers/ImageBridgeChild.h"
#include "mozilla/layers/TextureForwarder.h"
-#include "MediaTelemetryConstants.h"
#include "mfapi.h"
#include "gfxPrefs.h"
#include "MFTDecoder.h"
diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
index 15e2e1097..e6dd29c6d 100644
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
@@ -38,39 +38,6 @@ WMFMediaDataDecoder::Init()
return InitPromise::CreateAndResolve(mMFTManager->GetType(), __func__);
}
-// A single telemetry sample is reported for each MediaDataDecoder object
-// that has detected error or produced output successfully.
-static void
-SendTelemetry(unsigned long hr)
-{
- // Collapse the error codes into a range of 0-0xff that can be viewed in
- // telemetry histograms. For most MF_E_* errors, unique samples are used,
- // retaining the least significant 7 or 8 bits. Other error codes are
- // bucketed.
- uint32_t sample;
- if (SUCCEEDED(hr)) {
- sample = 0;
- } else if (hr < 0xc00d36b0) {
- sample = 1; // low bucket
- } else if (hr < 0xc00d3700) {
- sample = hr & 0xffU; // MF_E_*
- } else if (hr <= 0xc00d3705) {
- sample = 0x80 + (hr & 0xfU); // more MF_E_*
- } else if (hr < 0xc00d6d60) {
- sample = 2; // mid bucket
- } else if (hr <= 0xc00d6d78) {
- sample = hr & 0xffU; // MF_E_TRANSFORM_*
- } else {
- sample = 3; // high bucket
- }
-
- nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
- [sample] {
- /* Telemetry STUB */
- });
- NS_DispatchToMainThread(runnable);
-}
-
void
WMFMediaDataDecoder::Shutdown()
{
@@ -90,9 +57,6 @@ WMFMediaDataDecoder::ProcessShutdown()
if (mMFTManager) {
mMFTManager->Shutdown();
mMFTManager = nullptr;
- if (!mRecordedError && mHasSuccessfulOutput) {
- SendTelemetry(S_OK);
- }
}
}
@@ -124,10 +88,6 @@ WMFMediaDataDecoder::ProcessDecode(MediaRawData* aSample)
NS_WARNING("MFTManager rejected sample");
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("MFTManager::Input:%x", hr)));
- if (!mRecordedError) {
- SendTelemetry(hr);
- mRecordedError = true;
- }
return;
}
@@ -143,7 +103,6 @@ WMFMediaDataDecoder::ProcessOutput()
HRESULT hr = S_OK;
while (SUCCEEDED(hr = mMFTManager->Output(mLastStreamOffset, output)) &&
output) {
- mHasSuccessfulOutput = true;
mCallback->Output(output);
}
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
@@ -152,10 +111,6 @@ WMFMediaDataDecoder::ProcessOutput()
NS_WARNING("WMFMediaDataDecoder failed to output data");
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("MFTManager::Output:%x", hr)));
- if (!mRecordedError) {
- SendTelemetry(hr);
- mRecordedError = true;
- }
}
}
diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.h b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
index a4dd49f56..f869012e7 100644
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -33,7 +33,7 @@ public:
// Returns S_OK on success, or MF_E_TRANSFORM_NEED_MORE_INPUT if there's not
// enough data to produce more output. If this returns a failure code other
// than MF_E_TRANSFORM_NEED_MORE_INPUT, an error will be reported to the
- // MP4Reader.
+ // MP4Demuxer.
virtual HRESULT Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutput) = 0;
@@ -136,10 +136,6 @@ private:
Atomic<bool> mIsFlushing;
bool mIsShutDown;
-
- // For telemetry
- bool mHasSuccessfulOutput = false;
- bool mRecordedError = false;
};
} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
index 8a51f817a..a7633a7de 100644
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -27,7 +27,6 @@
#include "mozilla/WindowsVersion.h"
#include "mozilla/Telemetry.h"
#include "nsPrintfCString.h"
-#include "MediaTelemetryConstants.h"
#include "GMPUtils.h" // For SplitAt. TODO: Move SplitAt to a central place.
#include "MP4Decoder.h"
#include "VPXDecoder.h"
diff --git a/dom/media/test/crashtests/1228484.html b/dom/media/test/crashtests/1228484.html
deleted file mode 100644
index 2b2e9b0f9..000000000
--- a/dom/media/test/crashtests/1228484.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
-<script>
-
-var htmlAudio = new Audio(URL.createObjectURL(new window.MediaSource()));
-
-(new window.AudioContext("ringer")).createMediaElementSource(htmlAudio);
-(new window.AudioContext("alarm")).createMediaElementSource(htmlAudio);
-
-</script>
-</head>
-</html>
diff --git a/dom/media/test/crashtests/crashtests.list b/dom/media/test/crashtests/crashtests.list
index 496fe5ee5..e4f25ca8d 100644
--- a/dom/media/test/crashtests/crashtests.list
+++ b/dom/media/test/crashtests/crashtests.list
@@ -81,8 +81,6 @@ load 1157994.html
load 1158427.html
load 1185176.html
load 1185192.html
-load 1223670.html
-load 1228484.html
load 1304948.html
load 1319486.html
load 1291702.html
diff --git a/dom/media/test/manifest.js b/dom/media/test/manifest.js
index 7e30cc97d..52e53a271 100644
--- a/dom/media/test/manifest.js
+++ b/dom/media/test/manifest.js
@@ -266,10 +266,10 @@ var gPlayTests = [
{ name:"small-shot.mp3", type:"audio/mpeg", duration:0.27 },
{ name:"owl.mp3", type:"audio/mpeg", duration:3.343 },
// owl.mp3 as above, but with something funny going on in the ID3v2 tag
- // that causes DirectShow to fail.
+ // that caused DirectShow to fail.
{ name:"owl-funny-id3.mp3", type:"audio/mpeg", duration:3.343 },
// owl.mp3 as above, but with something even funnier going on in the ID3v2 tag
- // that causes DirectShow to fail.
+ // that caused DirectShow to fail.
{ name:"owl-funnier-id3.mp3", type:"audio/mpeg", duration:3.343 },
// One second of silence with ~140KB of ID3 tags. Usually when the first MP3
// frame is at such a high offset into the file, MP3FrameParser will give up
diff --git a/dom/media/test/test_can_play_type_mpeg.html b/dom/media/test/test_can_play_type_mpeg.html
index 89e5fabef..514b5cc2f 100644
--- a/dom/media/test/test_can_play_type_mpeg.html
+++ b/dom/media/test/test_can_play_type_mpeg.html
@@ -151,8 +151,7 @@ var haveMp4 = (getPref("media.wmf.enabled") && IsWindowsVistaOrLater()) ||
check_mp4(document.getElementById('v'), haveMp4);
-var haveMp3 = getPref("media.directshow.enabled") ||
- (getPref("media.wmf.enabled") && IsWindowsVistaOrLater()) ||
+var haveMp3 = getPref("media.wmf.enabled") ||
(IsLinux() && getPref("media.ffmpeg.enabled")) ||
(IsSupportedAndroid() &&
((IsJellyBeanOrLater() && getPref("media.android-media-codec.enabled")) ||
diff --git a/dom/media/webaudio/AudioContext.cpp b/dom/media/webaudio/AudioContext.cpp
index 85842c811..d58441309 100755
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -179,23 +179,13 @@ AudioContext::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
AudioContext::Constructor(const GlobalObject& aGlobal,
ErrorResult& aRv)
{
- return AudioContext::Constructor(aGlobal,
- AudioChannelService::GetDefaultAudioChannel(),
- aRv);
-}
-
-/* static */ already_AddRefed<AudioContext>
-AudioContext::Constructor(const GlobalObject& aGlobal,
- AudioChannel aChannel,
- ErrorResult& aRv)
-{
nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports());
if (!window) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
- RefPtr<AudioContext> object = new AudioContext(window, false, aChannel);
+ RefPtr<AudioContext> object = new AudioContext(window, false, AudioChannelService::GetDefaultAudioChannel());
aRv = object->Init();
if (NS_WARN_IF(aRv.Failed())) {
return nullptr;
diff --git a/dom/media/webaudio/AudioContext.h b/dom/media/webaudio/AudioContext.h
index 069efa986..599debef8 100644
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -151,12 +151,6 @@ public:
static already_AddRefed<AudioContext>
Constructor(const GlobalObject& aGlobal, ErrorResult& aRv);
- // Constructor for regular AudioContext. A default audio channel is needed.
- static already_AddRefed<AudioContext>
- Constructor(const GlobalObject& aGlobal,
- AudioChannel aChannel,
- ErrorResult& aRv);
-
// Constructor for offline AudioContext
static already_AddRefed<AudioContext>
Constructor(const GlobalObject& aGlobal,