summaryrefslogtreecommitdiffstats
path: root/dom/media/mediasink
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /dom/media/mediasink
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'dom/media/mediasink')
-rw-r--r--dom/media/mediasink/AudioSink.h72
-rw-r--r--dom/media/mediasink/AudioSinkWrapper.cpp248
-rw-r--r--dom/media/mediasink/AudioSinkWrapper.h108
-rw-r--r--dom/media/mediasink/DecodedAudioDataSink.cpp561
-rw-r--r--dom/media/mediasink/DecodedAudioDataSink.h165
-rw-r--r--dom/media/mediasink/DecodedStream.cpp781
-rw-r--r--dom/media/mediasink/DecodedStream.h122
-rw-r--r--dom/media/mediasink/MediaSink.h133
-rw-r--r--dom/media/mediasink/OutputStreamManager.cpp134
-rw-r--r--dom/media/mediasink/OutputStreamManager.h80
-rw-r--r--dom/media/mediasink/VideoSink.cpp486
-rw-r--r--dom/media/mediasink/VideoSink.h160
-rw-r--r--dom/media/mediasink/moz.build18
13 files changed, 3068 insertions, 0 deletions
diff --git a/dom/media/mediasink/AudioSink.h b/dom/media/mediasink/AudioSink.h
new file mode 100644
index 000000000..4f124d31f
--- /dev/null
+++ b/dom/media/mediasink/AudioSink.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(AudioSink_h__)
+#define AudioSink_h__
+
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "nsISupportsImpl.h"
+
+#include "MediaSink.h"
+
+namespace mozilla {
+
+class MediaData;
+template <class T> class MediaQueue;
+
+namespace media {
+
+/*
+ * Define basic APIs for derived class instance to operate or obtain
+ * information from it.
+ */
+class AudioSink {
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioSink)
+ AudioSink(MediaQueue<MediaData>& aAudioQueue)
+ : mAudioQueue(aAudioQueue)
+ {}
+
+ typedef MediaSink::PlaybackParams PlaybackParams;
+
+ // Return a promise which will be resolved when AudioSink finishes playing,
+ // or rejected if any error.
+ virtual RefPtr<GenericPromise> Init(const PlaybackParams& aParams) = 0;
+
+ virtual int64_t GetEndTime() const = 0;
+ virtual int64_t GetPosition() = 0;
+
+ // Check whether we've pushed more frames to the audio
+ // hardware than it has played.
+ virtual bool HasUnplayedFrames() = 0;
+
+ // Shut down the AudioSink's resources.
+ virtual void Shutdown() = 0;
+
+ // Change audio playback setting.
+ virtual void SetVolume(double aVolume) = 0;
+ virtual void SetPlaybackRate(double aPlaybackRate) = 0;
+ virtual void SetPreservesPitch(bool aPreservesPitch) = 0;
+
+ // Change audio playback status pause/resume.
+ virtual void SetPlaying(bool aPlaying) = 0;
+
+protected:
+ virtual ~AudioSink() {}
+
+ virtual MediaQueue<MediaData>& AudioQueue() const {
+ return mAudioQueue;
+ }
+
+ // To queue audio data (no matter it's plain or encoded or encrypted, depends
+ // on the subclass)
+ MediaQueue<MediaData>& mAudioQueue;
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/mediasink/AudioSinkWrapper.cpp b/dom/media/mediasink/AudioSinkWrapper.cpp
new file mode 100644
index 000000000..a2dfcd8fb
--- /dev/null
+++ b/dom/media/mediasink/AudioSinkWrapper.cpp
@@ -0,0 +1,248 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AudioSink.h"
+#include "AudioSinkWrapper.h"
+
+namespace mozilla {
+namespace media {
+
+AudioSinkWrapper::~AudioSinkWrapper()
+{
+}
+
+void
+AudioSinkWrapper::Shutdown()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(!mIsStarted, "Must be called after playback stopped.");
+ mCreator = nullptr;
+}
+
+const MediaSink::PlaybackParams&
+AudioSinkWrapper::GetPlaybackParams() const
+{
+ AssertOwnerThread();
+ return mParams;
+}
+
+void
+AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams)
+{
+ AssertOwnerThread();
+ if (mAudioSink) {
+ mAudioSink->SetVolume(aParams.mVolume);
+ mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
+ mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
+ }
+ mParams = aParams;
+}
+
+RefPtr<GenericPromise>
+AudioSinkWrapper::OnEnded(TrackType aType)
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
+ if (aType == TrackInfo::kAudioTrack) {
+ return mEndPromise;
+ }
+ return nullptr;
+}
+
+int64_t
+AudioSinkWrapper::GetEndTime(TrackType aType) const
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
+ if (aType == TrackInfo::kAudioTrack && mAudioSink) {
+ return mAudioSink->GetEndTime();
+ }
+ return -1;
+}
+
+int64_t
+AudioSinkWrapper::GetVideoPosition(TimeStamp aNow) const
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(!mPlayStartTime.IsNull());
+ // Time elapsed since we started playing.
+ int64_t delta = (aNow - mPlayStartTime).ToMicroseconds();
+ // Take playback rate into account.
+ return mPlayDuration + delta * mParams.mPlaybackRate;
+}
+
+int64_t
+AudioSinkWrapper::GetPosition(TimeStamp* aTimeStamp) const
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
+
+ int64_t pos = -1;
+ TimeStamp t = TimeStamp::Now();
+
+ if (!mAudioEnded) {
+ // Rely on the audio sink to report playback position when it is not ended.
+ pos = mAudioSink->GetPosition();
+ } else if (!mPlayStartTime.IsNull()) {
+ // Calculate playback position using system clock if we are still playing.
+ pos = GetVideoPosition(t);
+ } else {
+ // Return how long we've played if we are not playing.
+ pos = mPlayDuration;
+ }
+
+ if (aTimeStamp) {
+ *aTimeStamp = t;
+ }
+
+ return pos;
+}
+
+bool
+AudioSinkWrapper::HasUnplayedFrames(TrackType aType) const
+{
+ AssertOwnerThread();
+ return mAudioSink ? mAudioSink->HasUnplayedFrames() : false;
+}
+
+void
+AudioSinkWrapper::SetVolume(double aVolume)
+{
+ AssertOwnerThread();
+ mParams.mVolume = aVolume;
+ if (mAudioSink) {
+ mAudioSink->SetVolume(aVolume);
+ }
+}
+
+void
+AudioSinkWrapper::SetPlaybackRate(double aPlaybackRate)
+{
+ AssertOwnerThread();
+ if (!mAudioEnded) {
+ // Pass the playback rate to the audio sink. The underlying AudioStream
+ // will handle playback rate changes and report correct audio position.
+ mAudioSink->SetPlaybackRate(aPlaybackRate);
+ } else if (!mPlayStartTime.IsNull()) {
+ // Adjust playback duration and start time when we are still playing.
+ TimeStamp now = TimeStamp::Now();
+ mPlayDuration = GetVideoPosition(now);
+ mPlayStartTime = now;
+ }
+ // mParams.mPlaybackRate affects GetVideoPosition(). It should be updated
+ // after the calls to GetVideoPosition();
+ mParams.mPlaybackRate = aPlaybackRate;
+
+ // Do nothing when not playing. Changes in playback rate will be taken into
+ // account by GetVideoPosition().
+}
+
+void
+AudioSinkWrapper::SetPreservesPitch(bool aPreservesPitch)
+{
+ AssertOwnerThread();
+ mParams.mPreservesPitch = aPreservesPitch;
+ if (mAudioSink) {
+ mAudioSink->SetPreservesPitch(aPreservesPitch);
+ }
+}
+
+void
+AudioSinkWrapper::SetPlaying(bool aPlaying)
+{
+ AssertOwnerThread();
+
+ // Resume/pause matters only when playback started.
+ if (!mIsStarted) {
+ return;
+ }
+
+ if (mAudioSink) {
+ mAudioSink->SetPlaying(aPlaying);
+ }
+
+ if (aPlaying) {
+ MOZ_ASSERT(mPlayStartTime.IsNull());
+ mPlayStartTime = TimeStamp::Now();
+ } else {
+ // Remember how long we've played.
+ mPlayDuration = GetPosition();
+ // mPlayStartTime must be updated later since GetPosition()
+ // depends on the value of mPlayStartTime.
+ mPlayStartTime = TimeStamp();
+ }
+}
+
+void
+AudioSinkWrapper::Start(int64_t aStartTime, const MediaInfo& aInfo)
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(!mIsStarted, "playback already started.");
+
+ mIsStarted = true;
+ mPlayDuration = aStartTime;
+ mPlayStartTime = TimeStamp::Now();
+
+ // no audio is equivalent to audio ended before video starts.
+ mAudioEnded = !aInfo.HasAudio();
+
+ if (aInfo.HasAudio()) {
+ mAudioSink = mCreator->Create();
+ mEndPromise = mAudioSink->Init(mParams);
+
+ mAudioSinkPromise.Begin(mEndPromise->Then(
+ mOwnerThread.get(), __func__, this,
+ &AudioSinkWrapper::OnAudioEnded,
+ &AudioSinkWrapper::OnAudioEnded));
+ }
+}
+
+void
+AudioSinkWrapper::Stop()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mIsStarted, "playback not started.");
+
+ mIsStarted = false;
+ mAudioEnded = true;
+
+ if (mAudioSink) {
+ mAudioSinkPromise.DisconnectIfExists();
+ mAudioSink->Shutdown();
+ mAudioSink = nullptr;
+ mEndPromise = nullptr;
+ }
+}
+
+bool
+AudioSinkWrapper::IsStarted() const
+{
+ AssertOwnerThread();
+ return mIsStarted;
+}
+
+bool
+AudioSinkWrapper::IsPlaying() const
+{
+ AssertOwnerThread();
+ return IsStarted() && !mPlayStartTime.IsNull();
+}
+
+void
+AudioSinkWrapper::OnAudioEnded()
+{
+ AssertOwnerThread();
+ mAudioSinkPromise.Complete();
+ mPlayDuration = GetPosition();
+ if (!mPlayStartTime.IsNull()) {
+ mPlayStartTime = TimeStamp::Now();
+ }
+ mAudioEnded = true;
+}
+
+} // namespace media
+} // namespace mozilla
+
diff --git a/dom/media/mediasink/AudioSinkWrapper.h b/dom/media/mediasink/AudioSinkWrapper.h
new file mode 100644
index 000000000..46d402ee6
--- /dev/null
+++ b/dom/media/mediasink/AudioSinkWrapper.h
@@ -0,0 +1,108 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AudioSinkWrapper_h_
+#define AudioSinkWrapper_h_
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/dom/AudioChannelBinding.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/UniquePtr.h"
+
+#include "MediaSink.h"
+
+namespace mozilla {
+
+class MediaData;
+template <class T> class MediaQueue;
+
+namespace media {
+
+class AudioSink;
+
+/**
+ * A wrapper around AudioSink to provide the interface of MediaSink.
+ */
+class AudioSinkWrapper : public MediaSink {
+ // An AudioSink factory.
+ class Creator {
+ public:
+ virtual ~Creator() {}
+ virtual AudioSink* Create() = 0;
+ };
+
+ // Wrap around a function object which creates AudioSinks.
+ template <typename Function>
+ class CreatorImpl : public Creator {
+ public:
+ explicit CreatorImpl(const Function& aFunc) : mFunction(aFunc) {}
+ AudioSink* Create() override { return mFunction(); }
+ private:
+ Function mFunction;
+ };
+
+public:
+ template <typename Function>
+ AudioSinkWrapper(AbstractThread* aOwnerThread, const Function& aFunc)
+ : mOwnerThread(aOwnerThread)
+ , mCreator(new CreatorImpl<Function>(aFunc))
+ , mIsStarted(false)
+ // Give an insane value to facilitate debug if used before playback starts.
+ , mPlayDuration(INT64_MAX)
+ , mAudioEnded(true)
+ {}
+
+ const PlaybackParams& GetPlaybackParams() const override;
+ void SetPlaybackParams(const PlaybackParams& aParams) override;
+
+ RefPtr<GenericPromise> OnEnded(TrackType aType) override;
+ int64_t GetEndTime(TrackType aType) const override;
+ int64_t GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
+ bool HasUnplayedFrames(TrackType aType) const override;
+
+ void SetVolume(double aVolume) override;
+ void SetPlaybackRate(double aPlaybackRate) override;
+ void SetPreservesPitch(bool aPreservesPitch) override;
+ void SetPlaying(bool aPlaying) override;
+
+ void Start(int64_t aStartTime, const MediaInfo& aInfo) override;
+ void Stop() override;
+ bool IsStarted() const override;
+ bool IsPlaying() const override;
+
+ void Shutdown() override;
+
+private:
+ virtual ~AudioSinkWrapper();
+
+ void AssertOwnerThread() const {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ }
+
+ int64_t GetVideoPosition(TimeStamp aNow) const;
+
+ void OnAudioEnded();
+
+ const RefPtr<AbstractThread> mOwnerThread;
+ UniquePtr<Creator> mCreator;
+ RefPtr<AudioSink> mAudioSink;
+ RefPtr<GenericPromise> mEndPromise;
+
+ bool mIsStarted;
+ PlaybackParams mParams;
+
+ TimeStamp mPlayStartTime;
+ int64_t mPlayDuration;
+
+ bool mAudioEnded;
+ MozPromiseRequestHolder<GenericPromise> mAudioSinkPromise;
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif //AudioSinkWrapper_h_
diff --git a/dom/media/mediasink/DecodedAudioDataSink.cpp b/dom/media/mediasink/DecodedAudioDataSink.cpp
new file mode 100644
index 000000000..e7fcffe4f
--- /dev/null
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -0,0 +1,561 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsPrintfCString.h"
+#include "MediaQueue.h"
+#include "DecodedAudioDataSink.h"
+#include "VideoUtils.h"
+#include "AudioConverter.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+#include "MediaPrefs.h"
+
+namespace mozilla {
+
+extern LazyLogModule gMediaDecoderLog;
+#define SINK_LOG(msg, ...) \
+ MOZ_LOG(gMediaDecoderLog, LogLevel::Debug, \
+ ("DecodedAudioDataSink=%p " msg, this, ##__VA_ARGS__))
+#define SINK_LOG_V(msg, ...) \
+ MOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, \
+ ("DecodedAudioDataSink=%p " msg, this, ##__VA_ARGS__))
+
+namespace media {
+
+// The amount of audio frames that is used to fuzz rounding errors.
+static const int64_t AUDIO_FUZZ_FRAMES = 1;
+
+// Amount of audio frames we will be processing ahead of use
+static const int32_t LOW_AUDIO_USECS = 300000;
+
+DecodedAudioDataSink::DecodedAudioDataSink(AbstractThread* aThread,
+ MediaQueue<MediaData>& aAudioQueue,
+ int64_t aStartTime,
+ const AudioInfo& aInfo,
+ dom::AudioChannel aChannel)
+ : AudioSink(aAudioQueue)
+ , mStartTime(aStartTime)
+ , mLastGoodPosition(0)
+ , mInfo(aInfo)
+ , mChannel(aChannel)
+ , mPlaying(true)
+ , mMonitor("DecodedAudioDataSink")
+ , mWritten(0)
+ , mErrored(false)
+ , mPlaybackComplete(false)
+ , mOwnerThread(aThread)
+ , mProcessedQueueLength(0)
+ , mFramesParsed(0)
+ , mLastEndTime(0)
+ , mIsAudioDataAudible(false)
+{
+ bool resampling = MediaPrefs::AudioSinkResampling();
+
+ if (resampling) {
+ mOutputRate = MediaPrefs::AudioSinkResampleRate();
+ } else if (mInfo.mRate == 44100 || mInfo.mRate == 48000) {
+ // The original rate is of good quality and we want to minimize unecessary
+ // resampling. The common scenario being that the sampling rate is one or
+ // the other, this allows to minimize audio quality regression and hoping
+ // content provider want change from those rates mid-stream.
+ mOutputRate = mInfo.mRate;
+ } else {
+ // We will resample all data to match cubeb's preferred sampling rate.
+ mOutputRate = AudioStream::GetPreferredRate();
+ }
+ MOZ_DIAGNOSTIC_ASSERT(mOutputRate, "output rate can't be 0.");
+
+ bool monoAudioEnabled = MediaPrefs::MonoAudio();
+
+ mOutputChannels = monoAudioEnabled
+ ? 1 : (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
+}
+
+DecodedAudioDataSink::~DecodedAudioDataSink()
+{
+}
+
+RefPtr<GenericPromise>
+DecodedAudioDataSink::Init(const PlaybackParams& aParams)
+{
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+
+ mAudioQueueListener = mAudioQueue.PushEvent().Connect(
+ mOwnerThread, this, &DecodedAudioDataSink::OnAudioPushed);
+ mAudioQueueFinishListener = mAudioQueue.FinishEvent().Connect(
+ mOwnerThread, this, &DecodedAudioDataSink::NotifyAudioNeeded);
+ mProcessedQueueListener = mProcessedQueue.PopEvent().Connect(
+ mOwnerThread, this, &DecodedAudioDataSink::OnAudioPopped);
+
+ // To ensure at least one audio packet will be popped from AudioQueue and
+ // ready to be played.
+ NotifyAudioNeeded();
+ RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__);
+ nsresult rv = InitializeAudioStream(aParams);
+ if (NS_FAILED(rv)) {
+ mEndPromise.Reject(rv, __func__);
+ }
+ return p;
+}
+
+int64_t
+DecodedAudioDataSink::GetPosition()
+{
+ int64_t pos;
+ if (mAudioStream &&
+ (pos = mAudioStream->GetPosition()) >= 0) {
+ NS_ASSERTION(pos >= mLastGoodPosition,
+ "AudioStream position shouldn't go backward");
+ // Update the last good position when we got a good one.
+ if (pos >= mLastGoodPosition) {
+ mLastGoodPosition = pos;
+ }
+ }
+
+ return mStartTime + mLastGoodPosition;
+}
+
+bool
+DecodedAudioDataSink::HasUnplayedFrames()
+{
+ // Experimentation suggests that GetPositionInFrames() is zero-indexed,
+ // so we need to add 1 here before comparing it to mWritten.
+ int64_t total;
+ {
+ MonitorAutoLock mon(mMonitor);
+ total = mWritten + (mCursor.get() ? mCursor->Available() : 0);
+ }
+ return mProcessedQueue.GetSize() ||
+ (mAudioStream && mAudioStream->GetPositionInFrames() + 1 < total);
+}
+
+void
+DecodedAudioDataSink::Shutdown()
+{
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+
+ mAudioQueueListener.Disconnect();
+ mAudioQueueFinishListener.Disconnect();
+ mProcessedQueueListener.Disconnect();
+
+ if (mAudioStream) {
+ mAudioStream->Shutdown();
+ mAudioStream = nullptr;
+ }
+ mProcessedQueue.Reset();
+ mProcessedQueue.Finish();
+ mEndPromise.ResolveIfExists(true, __func__);
+}
+
+void
+DecodedAudioDataSink::SetVolume(double aVolume)
+{
+ if (mAudioStream) {
+ mAudioStream->SetVolume(aVolume);
+ }
+}
+
+void
+DecodedAudioDataSink::SetPlaybackRate(double aPlaybackRate)
+{
+ MOZ_ASSERT(aPlaybackRate != 0, "Don't set the playbackRate to 0 on AudioStream");
+ if (mAudioStream) {
+ mAudioStream->SetPlaybackRate(aPlaybackRate);
+ }
+}
+
+void
+DecodedAudioDataSink::SetPreservesPitch(bool aPreservesPitch)
+{
+ if (mAudioStream) {
+ mAudioStream->SetPreservesPitch(aPreservesPitch);
+ }
+}
+
+void
+DecodedAudioDataSink::SetPlaying(bool aPlaying)
+{
+ if (!mAudioStream || mPlaying == aPlaying || mPlaybackComplete) {
+ return;
+ }
+ // pause/resume AudioStream as necessary.
+ if (!aPlaying) {
+ mAudioStream->Pause();
+ } else if (aPlaying) {
+ mAudioStream->Resume();
+ }
+ mPlaying = aPlaying;
+}
+
+nsresult
+DecodedAudioDataSink::InitializeAudioStream(const PlaybackParams& aParams)
+{
+ mAudioStream = new AudioStream(*this);
+ nsresult rv = mAudioStream->Init(mOutputChannels, mOutputRate, mChannel);
+ if (NS_FAILED(rv)) {
+ mAudioStream->Shutdown();
+ mAudioStream = nullptr;
+ return rv;
+ }
+
+ // Set playback params before calling Start() so they can take effect
+ // as soon as the 1st DataCallback of the AudioStream fires.
+ mAudioStream->SetVolume(aParams.mVolume);
+ mAudioStream->SetPlaybackRate(aParams.mPlaybackRate);
+ mAudioStream->SetPreservesPitch(aParams.mPreservesPitch);
+ mAudioStream->Start();
+
+ return NS_OK;
+}
+
+int64_t
+DecodedAudioDataSink::GetEndTime() const
+{
+ int64_t written;
+ {
+ MonitorAutoLock mon(mMonitor);
+ written = mWritten;
+ }
+ CheckedInt64 playedUsecs = FramesToUsecs(written, mOutputRate) + mStartTime;
+ if (!playedUsecs.isValid()) {
+ NS_WARNING("Int overflow calculating audio end time");
+ return -1;
+ }
+ // As we may be resampling, rounding errors may occur. Ensure we never get
+ // past the original end time.
+ return std::min<int64_t>(mLastEndTime, playedUsecs.value());
+}
+
+UniquePtr<AudioStream::Chunk>
+DecodedAudioDataSink::PopFrames(uint32_t aFrames)
+{
+ class Chunk : public AudioStream::Chunk {
+ public:
+ Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
+ : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
+ Chunk() : mFrames(0), mData(nullptr) {}
+ const AudioDataValue* Data() const { return mData; }
+ uint32_t Frames() const { return mFrames; }
+ uint32_t Channels() const { return mBuffer ? mBuffer->mChannels: 0; }
+ uint32_t Rate() const { return mBuffer ? mBuffer->mRate : 0; }
+ AudioDataValue* GetWritable() const { return mData; }
+ private:
+ const RefPtr<AudioData> mBuffer;
+ const uint32_t mFrames;
+ AudioDataValue* const mData;
+ };
+
+ class SilentChunk : public AudioStream::Chunk {
+ public:
+ SilentChunk(uint32_t aFrames, uint32_t aChannels, uint32_t aRate)
+ : mFrames(aFrames)
+ , mChannels(aChannels)
+ , mRate(aRate)
+ , mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
+ memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
+ }
+ const AudioDataValue* Data() const { return mData.get(); }
+ uint32_t Frames() const { return mFrames; }
+ uint32_t Channels() const { return mChannels; }
+ uint32_t Rate() const { return mRate; }
+ AudioDataValue* GetWritable() const { return mData.get(); }
+ private:
+ const uint32_t mFrames;
+ const uint32_t mChannels;
+ const uint32_t mRate;
+ UniquePtr<AudioDataValue[]> mData;
+ };
+
+ bool needPopping = false;
+ if (!mCurrentData) {
+ // No data in the queue. Return an empty chunk.
+ if (!mProcessedQueue.GetSize()) {
+ return MakeUnique<Chunk>();
+ }
+
+ // We need to update our values prior popping the processed queue in
+ // order to prevent the pop event to fire too early (prior
+ // mProcessedQueueLength being updated) or prevent HasUnplayedFrames
+ // to incorrectly return true during the time interval betweeen the
+ // when mProcessedQueue is read and mWritten is updated.
+ needPopping = true;
+ mCurrentData = mProcessedQueue.PeekFront();
+ {
+ MonitorAutoLock mon(mMonitor);
+ mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
+ mCurrentData->mChannels,
+ mCurrentData->mFrames);
+ }
+ MOZ_ASSERT(mCurrentData->mFrames > 0);
+ mProcessedQueueLength -=
+ FramesToUsecs(mCurrentData->mFrames, mOutputRate).value();
+ }
+
+ auto framesToPop = std::min(aFrames, mCursor->Available());
+
+ SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
+ mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
+
+ UniquePtr<AudioStream::Chunk> chunk =
+ MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
+
+ {
+ MonitorAutoLock mon(mMonitor);
+ mWritten += framesToPop;
+ mCursor->Advance(framesToPop);
+ }
+
+ // All frames are popped. Reset mCurrentData so we can pop new elements from
+ // the audio queue in next calls to PopFrames().
+ if (!mCursor->Available()) {
+ mCurrentData = nullptr;
+ }
+
+ if (needPopping) {
+ // We can now safely pop the audio packet from the processed queue.
+ // This will fire the popped event, triggering a call to NotifyAudioNeeded.
+ RefPtr<AudioData> releaseMe = mProcessedQueue.PopFront();
+ CheckIsAudible(releaseMe);
+ }
+
+ return chunk;
+}
+
+bool
+DecodedAudioDataSink::Ended() const
+{
+ // Return true when error encountered so AudioStream can start draining.
+ return mProcessedQueue.IsFinished() || mErrored;
+}
+
+void
+DecodedAudioDataSink::Drained()
+{
+ SINK_LOG("Drained");
+ mPlaybackComplete = true;
+ mEndPromise.ResolveIfExists(true, __func__);
+}
+
+void
+DecodedAudioDataSink::CheckIsAudible(const AudioData* aData)
+{
+ MOZ_ASSERT(aData);
+
+ bool isAudible = aData->IsAudible();
+ if (isAudible != mIsAudioDataAudible) {
+ mIsAudioDataAudible = isAudible;
+ mAudibleEvent.Notify(mIsAudioDataAudible);
+ }
+}
+
+void
+DecodedAudioDataSink::OnAudioPopped(const RefPtr<MediaData>& aSample)
+{
+ SINK_LOG_V("AudioStream has used an audio packet.");
+ NotifyAudioNeeded();
+}
+
+void
+DecodedAudioDataSink::OnAudioPushed(const RefPtr<MediaData>& aSample)
+{
+ SINK_LOG_V("One new audio packet available.");
+ NotifyAudioNeeded();
+}
+
+void
+DecodedAudioDataSink::NotifyAudioNeeded()
+{
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
+ "Not called from the owner's thread");
+
+ // Always ensure we have two processed frames pending to allow for processing
+ // latency.
+ while (AudioQueue().GetSize() && (AudioQueue().IsFinished() ||
+ mProcessedQueueLength < LOW_AUDIO_USECS ||
+ mProcessedQueue.GetSize() < 2)) {
+ RefPtr<AudioData> data =
+ dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
+
+ // Ignore the element with 0 frames and try next.
+ if (!data->mFrames) {
+ continue;
+ }
+
+ if (!mConverter ||
+ (data->mRate != mConverter->InputConfig().Rate() ||
+ data->mChannels != mConverter->InputConfig().Channels())) {
+ SINK_LOG_V("Audio format changed from %u@%uHz to %u@%uHz",
+ mConverter? mConverter->InputConfig().Channels() : 0,
+ mConverter ? mConverter->InputConfig().Rate() : 0,
+ data->mChannels, data->mRate);
+
+ DrainConverter();
+
+ // mFramesParsed indicates the current playtime in frames at the current
+ // input sampling rate. Recalculate it per the new sampling rate.
+ if (mFramesParsed) {
+ // We minimize overflow.
+ uint32_t oldRate = mConverter->InputConfig().Rate();
+ uint32_t newRate = data->mRate;
+ CheckedInt64 result = SaferMultDiv(mFramesParsed, newRate, oldRate);
+ if (!result.isValid()) {
+ NS_WARNING("Int overflow in DecodedAudioDataSink");
+ mErrored = true;
+ return;
+ }
+ mFramesParsed = result.value();
+ }
+
+ mConverter =
+ MakeUnique<AudioConverter>(
+ AudioConfig(data->mChannels, data->mRate),
+ AudioConfig(mOutputChannels, mOutputRate));
+ }
+
+ // See if there's a gap in the audio. If there is, push silence into the
+ // audio hardware, so we can play across the gap.
+ // Calculate the timestamp of the next chunk of audio in numbers of
+ // samples.
+ CheckedInt64 sampleTime = UsecsToFrames(data->mTime - mStartTime,
+ data->mRate);
+ // Calculate the number of frames that have been pushed onto the audio hardware.
+ CheckedInt64 missingFrames = sampleTime - mFramesParsed;
+
+ if (!missingFrames.isValid()) {
+ NS_WARNING("Int overflow in DecodedAudioDataSink");
+ mErrored = true;
+ return;
+ }
+
+ if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
+ // The next audio packet begins some time after the end of the last packet
+ // we pushed to the audio hardware. We must push silence into the audio
+ // hardware so that the next audio packet begins playback at the correct
+ // time.
+ missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value());
+ mFramesParsed += missingFrames.value();
+
+ // We need to calculate how many frames are missing at the output rate.
+ missingFrames =
+ SaferMultDiv(missingFrames.value(), mOutputRate, data->mRate);
+ if (!missingFrames.isValid()) {
+ NS_WARNING("Int overflow in DecodedAudioDataSink");
+ mErrored = true;
+ return;
+ }
+
+ // We need to insert silence, first use drained frames if any.
+ missingFrames -= DrainConverter(missingFrames.value());
+ // Insert silence if still needed.
+ if (missingFrames.value()) {
+ AlignedAudioBuffer silenceData(missingFrames.value() * mOutputChannels);
+ if (!silenceData) {
+ NS_WARNING("OOM in DecodedAudioDataSink");
+ mErrored = true;
+ return;
+ }
+ RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
+ PushProcessedAudio(silence);
+ }
+ }
+
+ mLastEndTime = data->GetEndTime();
+ mFramesParsed += data->mFrames;
+
+ if (mConverter->InputConfig() != mConverter->OutputConfig()) {
+ // We must ensure that the size in the buffer contains exactly the number
+ // of frames, in case one of the audio producer over allocated the buffer.
+ AlignedAudioBuffer buffer(Move(data->mAudioData));
+ buffer.SetLength(size_t(data->mFrames) * data->mChannels);
+
+ AlignedAudioBuffer convertedData =
+ mConverter->Process(AudioSampleBuffer(Move(buffer))).Forget();
+ data = CreateAudioFromBuffer(Move(convertedData), data);
+ }
+ if (PushProcessedAudio(data)) {
+ mLastProcessedPacket = Some(data);
+ }
+ }
+
+ if (AudioQueue().IsFinished()) {
+ // We have reached the end of the data, drain the resampler.
+ DrainConverter();
+ mProcessedQueue.Finish();
+ }
+}
+
+uint32_t
+DecodedAudioDataSink::PushProcessedAudio(AudioData* aData)
+{
+ if (!aData || !aData->mFrames) {
+ return 0;
+ }
+ mProcessedQueue.Push(aData);
+ mProcessedQueueLength += FramesToUsecs(aData->mFrames, mOutputRate).value();
+ return aData->mFrames;
+}
+
+already_AddRefed<AudioData>
+DecodedAudioDataSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
+ AudioData* aReference)
+{
+ uint32_t frames = aBuffer.Length() / mOutputChannels;
+ if (!frames) {
+ return nullptr;
+ }
+ CheckedInt64 duration = FramesToUsecs(frames, mOutputRate);
+ if (!duration.isValid()) {
+ NS_WARNING("Int overflow in DecodedAudioDataSink");
+ mErrored = true;
+ return nullptr;
+ }
+ RefPtr<AudioData> data =
+ new AudioData(aReference->mOffset,
+ aReference->mTime,
+ duration.value(),
+ frames,
+ Move(aBuffer),
+ mOutputChannels,
+ mOutputRate);
+ return data.forget();
+}
+
+uint32_t
+DecodedAudioDataSink::DrainConverter(uint32_t aMaxFrames)
+{
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+
+ if (!mConverter || !mLastProcessedPacket || !aMaxFrames) {
+ // nothing to drain.
+ return 0;
+ }
+
+ RefPtr<AudioData> lastPacket = mLastProcessedPacket.ref();
+ mLastProcessedPacket.reset();
+
+ // To drain we simply provide an empty packet to the audio converter.
+ AlignedAudioBuffer convertedData =
+ mConverter->Process(AudioSampleBuffer(AlignedAudioBuffer())).Forget();
+
+ uint32_t frames = convertedData.Length() / mOutputChannels;
+ if (!convertedData.SetLength(std::min(frames, aMaxFrames) * mOutputChannels)) {
+ // This can never happen as we were reducing the length of convertData.
+ mErrored = true;
+ return 0;
+ }
+
+ RefPtr<AudioData> data =
+ CreateAudioFromBuffer(Move(convertedData), lastPacket);
+ if (!data) {
+ return 0;
+ }
+ mProcessedQueue.Push(data);
+ return data->mFrames;
+}
+
+} // namespace media
+} // namespace mozilla
diff --git a/dom/media/mediasink/DecodedAudioDataSink.h b/dom/media/mediasink/DecodedAudioDataSink.h
new file mode 100644
index 000000000..36412984a
--- /dev/null
+++ b/dom/media/mediasink/DecodedAudioDataSink.h
@@ -0,0 +1,165 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(DecodedAudioDataSink_h__)
+#define DecodedAudioDataSink_h__
+
+#include "AudioSink.h"
+#include "AudioStream.h"
+#include "MediaEventSource.h"
+#include "MediaQueue.h"
+#include "MediaInfo.h"
+#include "mozilla/RefPtr.h"
+#include "nsISupportsImpl.h"
+
+#include "mozilla/dom/AudioChannelBinding.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/Monitor.h"
+
+namespace mozilla {
+
+class AudioConverter;
+
+namespace media {
+
+class DecodedAudioDataSink : public AudioSink,
+ private AudioStream::DataSource {
+public:
+ DecodedAudioDataSink(AbstractThread* aThread,
+ MediaQueue<MediaData>& aAudioQueue,
+ int64_t aStartTime,
+ const AudioInfo& aInfo,
+ dom::AudioChannel aChannel);
+
+ // Return a promise which will be resolved when DecodedAudioDataSink
+ // finishes playing, or rejected if any error.
+ RefPtr<GenericPromise> Init(const PlaybackParams& aParams) override;
+
+ /*
+ * All public functions are not thread-safe.
+ * Called on the task queue of MDSM only.
+ */
+ int64_t GetPosition() override;
+ int64_t GetEndTime() const override;
+
+ // Check whether we've pushed more frames to the audio hardware than it has
+ // played.
+ bool HasUnplayedFrames() override;
+
+ // Shut down the DecodedAudioDataSink's resources.
+ void Shutdown() override;
+
+ void SetVolume(double aVolume) override;
+ void SetPlaybackRate(double aPlaybackRate) override;
+ void SetPreservesPitch(bool aPreservesPitch) override;
+ void SetPlaying(bool aPlaying) override;
+
+ MediaEventSource<bool>& AudibleEvent() {
+ return mAudibleEvent;
+ }
+
+private:
+ virtual ~DecodedAudioDataSink();
+
+ // Allocate and initialize mAudioStream. Returns NS_OK on success.
+ nsresult InitializeAudioStream(const PlaybackParams& aParams);
+
+ // Interface of AudioStream::DataSource.
+ // Called on the callback thread of cubeb.
+ UniquePtr<AudioStream::Chunk> PopFrames(uint32_t aFrames) override;
+ bool Ended() const override;
+ void Drained() override;
+
+ void CheckIsAudible(const AudioData* aData);
+
+ // The audio stream resource. Used on the task queue of MDSM only.
+ RefPtr<AudioStream> mAudioStream;
+
+ // The presentation time of the first audio frame that was played in
+ // microseconds. We can add this to the audio stream position to determine
+ // the current audio time.
+ const int64_t mStartTime;
+
+ // Keep the last good position returned from the audio stream. Used to ensure
+ // position returned by GetPosition() is mono-increasing in spite of audio
+ // stream error. Used on the task queue of MDSM only.
+ int64_t mLastGoodPosition;
+
+ const AudioInfo mInfo;
+
+ const dom::AudioChannel mChannel;
+
+ // Used on the task queue of MDSM only.
+ bool mPlaying;
+
+ MozPromiseHolder<GenericPromise> mEndPromise;
+
+ /*
+ * Members to implement AudioStream::DataSource.
+ * Used on the callback thread of cubeb.
+ */
+ // The AudioData at which AudioStream::DataSource is reading.
+ RefPtr<AudioData> mCurrentData;
+
+ // Monitor protecting access to mCursor and mWritten.
+ // mCursor is created/destroyed on the cubeb thread, while we must also
+ // ensure that mWritten and mCursor::Available() get modified simultaneously.
+ // (written on cubeb thread, and read on MDSM task queue).
+ mutable Monitor mMonitor;
+ // Keep track of the read position of mCurrentData.
+ UniquePtr<AudioBufferCursor> mCursor;
+
+ // PCM frames written to the stream so far.
+ int64_t mWritten;
+
+ // True if there is any error in processing audio data like overflow.
+ Atomic<bool> mErrored;
+
+ // Set on the callback thread of cubeb once the stream has drained.
+ Atomic<bool> mPlaybackComplete;
+
+ const RefPtr<AbstractThread> mOwnerThread;
+
+ // Audio Processing objects and methods
+ void OnAudioPopped(const RefPtr<MediaData>& aSample);
+ void OnAudioPushed(const RefPtr<MediaData>& aSample);
+ void NotifyAudioNeeded();
+ // Drain the converter and add the output to the processed audio queue.
+ // A maximum of aMaxFrames will be added.
+ uint32_t DrainConverter(uint32_t aMaxFrames = UINT32_MAX);
+ already_AddRefed<AudioData> CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
+ AudioData* aReference);
+ // Add data to the processsed queue, update mProcessedQueueLength and
+ // return the number of frames added.
+ uint32_t PushProcessedAudio(AudioData* aData);
+ UniquePtr<AudioConverter> mConverter;
+ MediaQueue<AudioData> mProcessedQueue;
+ // Length in microseconds of the ProcessedQueue
+ Atomic<int32_t> mProcessedQueueLength;
+ MediaEventListener mAudioQueueListener;
+ MediaEventListener mAudioQueueFinishListener;
+ MediaEventListener mProcessedQueueListener;
+ // Number of frames processed from AudioQueue(). Used to determine gaps in
+ // the input stream. It indicates the time in frames since playback started
+ // at the current input framerate.
+ int64_t mFramesParsed;
+ Maybe<RefPtr<AudioData>> mLastProcessedPacket;
+ int64_t mLastEndTime;
+ // Never modifed after construction.
+ uint32_t mOutputRate;
+ uint32_t mOutputChannels;
+
+ // True when audio is producing audible sound, false when audio is silent.
+ bool mIsAudioDataAudible;
+
+ MediaEventProducer<bool> mAudibleEvent;
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/mediasink/DecodedStream.cpp b/dom/media/mediasink/DecodedStream.cpp
new file mode 100644
index 000000000..9501a6cde
--- /dev/null
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -0,0 +1,781 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/gfx/Point.h"
+#include "mozilla/SyncRunnable.h"
+
+#include "AudioSegment.h"
+#include "DecodedStream.h"
+#include "MediaData.h"
+#include "MediaQueue.h"
+#include "MediaStreamGraph.h"
+#include "MediaStreamListener.h"
+#include "OutputStreamManager.h"
+#include "SharedBuffer.h"
+#include "VideoSegment.h"
+#include "VideoUtils.h"
+
+namespace mozilla {
+
+#undef DUMP_LOG
+#define DUMP_LOG(x, ...) NS_DebugBreak(NS_DEBUG_WARNING, nsPrintfCString(x, ##__VA_ARGS__).get(), nullptr, nullptr, -1)
+
+/*
+ * A container class to make it easier to pass the playback info all the
+ * way to DecodedStreamGraphListener from DecodedStream.
+ */
+struct PlaybackInfoInit {
+ int64_t mStartTime;
+ MediaInfo mInfo;
+};
+
+class DecodedStreamGraphListener : public MediaStreamListener {
+public:
+ DecodedStreamGraphListener(MediaStream* aStream,
+ MozPromiseHolder<GenericPromise>&& aPromise)
+ : mMutex("DecodedStreamGraphListener::mMutex")
+ , mStream(aStream)
+ {
+ mFinishPromise = Move(aPromise);
+ }
+
+ void NotifyOutput(MediaStreamGraph* aGraph, GraphTime aCurrentTime) override
+ {
+ MutexAutoLock lock(mMutex);
+ if (mStream) {
+ int64_t t = mStream->StreamTimeToMicroseconds(
+ mStream->GraphTimeToStreamTime(aCurrentTime));
+ mOnOutput.Notify(t);
+ }
+ }
+
+ void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamGraphEvent event) override
+ {
+ if (event == MediaStreamGraphEvent::EVENT_FINISHED) {
+ nsCOMPtr<nsIRunnable> event =
+ NewRunnableMethod(this, &DecodedStreamGraphListener::DoNotifyFinished);
+ aGraph->DispatchToMainThreadAfterStreamStateUpdate(event.forget());
+ }
+ }
+
+ void DoNotifyFinished()
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ mFinishPromise.ResolveIfExists(true, __func__);
+ }
+
+ void Forget()
+ {
+ RefPtr<DecodedStreamGraphListener> self = this;
+ AbstractThread::MainThread()->Dispatch(NS_NewRunnableFunction([self] () {
+ MOZ_ASSERT(NS_IsMainThread());
+ self->mFinishPromise.ResolveIfExists(true, __func__);
+ }));
+ MutexAutoLock lock(mMutex);
+ mStream = nullptr;
+ }
+
+ MediaEventSource<int64_t>& OnOutput()
+ {
+ return mOnOutput;
+ }
+
+private:
+ MediaEventProducer<int64_t> mOnOutput;
+
+ Mutex mMutex;
+ // Members below are protected by mMutex.
+ RefPtr<MediaStream> mStream;
+ // Main thread only.
+ MozPromiseHolder<GenericPromise> mFinishPromise;
+};
+
+static void
+UpdateStreamSuspended(MediaStream* aStream, bool aBlocking)
+{
+ if (NS_IsMainThread()) {
+ if (aBlocking) {
+ aStream->Suspend();
+ } else {
+ aStream->Resume();
+ }
+ } else {
+ nsCOMPtr<nsIRunnable> r;
+ if (aBlocking) {
+ r = NewRunnableMethod(aStream, &MediaStream::Suspend);
+ } else {
+ r = NewRunnableMethod(aStream, &MediaStream::Resume);
+ }
+ AbstractThread::MainThread()->Dispatch(r.forget());
+ }
+}
+
+/*
+ * All MediaStream-related data is protected by the decoder's monitor.
+ * We have at most one DecodedStreamDaata per MediaDecoder. Its stream
+ * is used as the input for each ProcessedMediaStream created by calls to
+ * captureStream(UntilEnded). Seeking creates a new source stream, as does
+ * replaying after the input as ended. In the latter case, the new source is
+ * not connected to streams created by captureStreamUntilEnded.
+ */
+class DecodedStreamData {
+public:
+ DecodedStreamData(OutputStreamManager* aOutputStreamManager,
+ PlaybackInfoInit&& aInit,
+ MozPromiseHolder<GenericPromise>&& aPromise);
+ ~DecodedStreamData();
+ void SetPlaying(bool aPlaying);
+ MediaEventSource<int64_t>& OnOutput();
+ void Forget();
+ void DumpDebugInfo();
+
+ /* The following group of fields are protected by the decoder's monitor
+ * and can be read or written on any thread.
+ */
+ // Count of audio frames written to the stream
+ int64_t mAudioFramesWritten;
+ // mNextVideoTime is the end timestamp for the last packet sent to the stream.
+ // Therefore video packets starting at or after this time need to be copied
+ // to the output stream.
+ int64_t mNextVideoTime; // microseconds
+ int64_t mNextAudioTime; // microseconds
+ // The last video image sent to the stream. Useful if we need to replicate
+ // the image.
+ RefPtr<layers::Image> mLastVideoImage;
+ gfx::IntSize mLastVideoImageDisplaySize;
+ bool mHaveSentFinish;
+ bool mHaveSentFinishAudio;
+ bool mHaveSentFinishVideo;
+
+ // The decoder is responsible for calling Destroy() on this stream.
+ const RefPtr<SourceMediaStream> mStream;
+ const RefPtr<DecodedStreamGraphListener> mListener;
+ bool mPlaying;
+ // True if we need to send a compensation video frame to ensure the
+ // StreamTime going forward.
+ bool mEOSVideoCompensation;
+
+ const RefPtr<OutputStreamManager> mOutputStreamManager;
+};
+
+DecodedStreamData::DecodedStreamData(OutputStreamManager* aOutputStreamManager,
+ PlaybackInfoInit&& aInit,
+ MozPromiseHolder<GenericPromise>&& aPromise)
+ : mAudioFramesWritten(0)
+ , mNextVideoTime(aInit.mStartTime)
+ , mNextAudioTime(aInit.mStartTime)
+ , mHaveSentFinish(false)
+ , mHaveSentFinishAudio(false)
+ , mHaveSentFinishVideo(false)
+ , mStream(aOutputStreamManager->Graph()->CreateSourceStream())
+ // DecodedStreamGraphListener will resolve this promise.
+ , mListener(new DecodedStreamGraphListener(mStream, Move(aPromise)))
+ // mPlaying is initially true because MDSM won't start playback until playing
+ // becomes true. This is consistent with the settings of AudioSink.
+ , mPlaying(true)
+ , mEOSVideoCompensation(false)
+ , mOutputStreamManager(aOutputStreamManager)
+{
+ mStream->AddListener(mListener);
+ mOutputStreamManager->Connect(mStream);
+
+ // Initialize tracks.
+ if (aInit.mInfo.HasAudio()) {
+ mStream->AddAudioTrack(aInit.mInfo.mAudio.mTrackId,
+ aInit.mInfo.mAudio.mRate,
+ 0, new AudioSegment());
+ }
+ if (aInit.mInfo.HasVideo()) {
+ mStream->AddTrack(aInit.mInfo.mVideo.mTrackId, 0, new VideoSegment());
+ }
+}
+
+DecodedStreamData::~DecodedStreamData()
+{
+ mOutputStreamManager->Disconnect();
+ mStream->Destroy();
+}
+
+MediaEventSource<int64_t>&
+DecodedStreamData::OnOutput()
+{
+ return mListener->OnOutput();
+}
+
+void
+DecodedStreamData::SetPlaying(bool aPlaying)
+{
+ if (mPlaying != aPlaying) {
+ mPlaying = aPlaying;
+ UpdateStreamSuspended(mStream, !mPlaying);
+ }
+}
+
+void
+DecodedStreamData::Forget()
+{
+ mListener->Forget();
+}
+
+void
+DecodedStreamData::DumpDebugInfo()
+{
+ DUMP_LOG(
+ "DecodedStreamData=%p mPlaying=%d mAudioFramesWritten=%lld"
+ "mNextAudioTime=%lld mNextVideoTime=%lld mHaveSentFinish=%d"
+ "mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
+ this, mPlaying, mAudioFramesWritten, mNextAudioTime, mNextVideoTime,
+ mHaveSentFinish, mHaveSentFinishAudio, mHaveSentFinishVideo);
+}
+
+DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
+ MediaQueue<MediaData>& aAudioQueue,
+ MediaQueue<MediaData>& aVideoQueue,
+ OutputStreamManager* aOutputStreamManager,
+ const bool& aSameOrigin,
+ const PrincipalHandle& aPrincipalHandle)
+ : mOwnerThread(aOwnerThread)
+ , mOutputStreamManager(aOutputStreamManager)
+ , mPlaying(false)
+ , mSameOrigin(aSameOrigin)
+ , mPrincipalHandle(aPrincipalHandle)
+ , mAudioQueue(aAudioQueue)
+ , mVideoQueue(aVideoQueue)
+{
+}
+
+DecodedStream::~DecodedStream()
+{
+ MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
+}
+
+const media::MediaSink::PlaybackParams&
+DecodedStream::GetPlaybackParams() const
+{
+ AssertOwnerThread();
+ return mParams;
+}
+
+void
+DecodedStream::SetPlaybackParams(const PlaybackParams& aParams)
+{
+ AssertOwnerThread();
+ mParams = aParams;
+}
+
+RefPtr<GenericPromise>
+DecodedStream::OnEnded(TrackType aType)
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mStartTime.isSome());
+
+ if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
+ // TODO: we should return a promise which is resolved when the audio track
+ // is finished. For now this promise is resolved when the whole stream is
+ // finished.
+ return mFinishPromise;
+ } else if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) {
+ return mFinishPromise;
+ }
+ return nullptr;
+}
+
+void
+DecodedStream::Start(int64_t aStartTime, const MediaInfo& aInfo)
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
+
+ mStartTime.emplace(aStartTime);
+ mLastOutputTime = 0;
+ mInfo = aInfo;
+ mPlaying = true;
+ ConnectListener();
+
+ class R : public Runnable {
+ typedef MozPromiseHolder<GenericPromise> Promise;
+ public:
+ R(PlaybackInfoInit&& aInit, Promise&& aPromise, OutputStreamManager* aManager)
+ : mInit(Move(aInit)), mOutputStreamManager(aManager)
+ {
+ mPromise = Move(aPromise);
+ }
+ NS_IMETHOD Run() override
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ // No need to create a source stream when there are no output streams. This
+ // happens when RemoveOutput() is called immediately after StartPlayback().
+ if (!mOutputStreamManager->Graph()) {
+ // Resolve the promise to indicate the end of playback.
+ mPromise.Resolve(true, __func__);
+ return NS_OK;
+ }
+ mData = MakeUnique<DecodedStreamData>(
+ mOutputStreamManager, Move(mInit), Move(mPromise));
+ return NS_OK;
+ }
+ UniquePtr<DecodedStreamData> ReleaseData()
+ {
+ return Move(mData);
+ }
+ private:
+ PlaybackInfoInit mInit;
+ Promise mPromise;
+ RefPtr<OutputStreamManager> mOutputStreamManager;
+ UniquePtr<DecodedStreamData> mData;
+ };
+
+ MozPromiseHolder<GenericPromise> promise;
+ mFinishPromise = promise.Ensure(__func__);
+ PlaybackInfoInit init {
+ aStartTime, aInfo
+ };
+ nsCOMPtr<nsIRunnable> r = new R(Move(init), Move(promise), mOutputStreamManager);
+ nsCOMPtr<nsIThread> mainThread = do_GetMainThread();
+ SyncRunnable::DispatchToThread(mainThread, r);
+ mData = static_cast<R*>(r.get())->ReleaseData();
+
+ if (mData) {
+ mOutputListener = mData->OnOutput().Connect(
+ mOwnerThread, this, &DecodedStream::NotifyOutput);
+ mData->SetPlaying(mPlaying);
+ SendData();
+ }
+}
+
+void
+DecodedStream::Stop()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mStartTime.isSome(), "playback not started.");
+
+ mStartTime.reset();
+ DisconnectListener();
+ mFinishPromise = nullptr;
+
+ // Clear mData immediately when this playback session ends so we won't
+ // send data to the wrong stream in SendData() in next playback session.
+ DestroyData(Move(mData));
+}
+
+bool
+DecodedStream::IsStarted() const
+{
+ AssertOwnerThread();
+ return mStartTime.isSome();
+}
+
+bool
+DecodedStream::IsPlaying() const
+{
+ AssertOwnerThread();
+ return IsStarted() && mPlaying;
+}
+
+void
+DecodedStream::DestroyData(UniquePtr<DecodedStreamData> aData)
+{
+ AssertOwnerThread();
+
+ if (!aData) {
+ return;
+ }
+
+ mOutputListener.Disconnect();
+
+ DecodedStreamData* data = aData.release();
+ data->Forget();
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () {
+ delete data;
+ });
+ AbstractThread::MainThread()->Dispatch(r.forget());
+}
+
+void
+DecodedStream::SetPlaying(bool aPlaying)
+{
+ AssertOwnerThread();
+
+ // Resume/pause matters only when playback started.
+ if (mStartTime.isNothing()) {
+ return;
+ }
+
+ mPlaying = aPlaying;
+ if (mData) {
+ mData->SetPlaying(aPlaying);
+ }
+}
+
+void
+DecodedStream::SetVolume(double aVolume)
+{
+ AssertOwnerThread();
+ mParams.mVolume = aVolume;
+}
+
+void
+DecodedStream::SetPlaybackRate(double aPlaybackRate)
+{
+ AssertOwnerThread();
+ mParams.mPlaybackRate = aPlaybackRate;
+}
+
+void
+DecodedStream::SetPreservesPitch(bool aPreservesPitch)
+{
+ AssertOwnerThread();
+ mParams.mPreservesPitch = aPreservesPitch;
+}
+
+static void
+SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
+ MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ // The amount of audio frames that is used to fuzz rounding errors.
+ static const int64_t AUDIO_FUZZ_FRAMES = 1;
+
+ MOZ_ASSERT(aData);
+ AudioData* audio = aData->As<AudioData>();
+ // This logic has to mimic AudioSink closely to make sure we write
+ // the exact same silences
+ CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
+ UsecsToFrames(aStartTime, aRate);
+ CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
+
+ if (!audioWrittenOffset.isValid() ||
+ !frameOffset.isValid() ||
+ // ignore packet that we've already processed
+ audio->GetEndTime() <= aStream->mNextAudioTime) {
+ return;
+ }
+
+ if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
+ int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
+ // Write silence to catch up
+ AudioSegment silence;
+ silence.InsertNullDataAtStart(silentFrames);
+ aStream->mAudioFramesWritten += silentFrames;
+ audioWrittenOffset += silentFrames;
+ aOutput->AppendFrom(&silence);
+ }
+
+ // Always write the whole sample without truncation to be consistent with
+ // DecodedAudioDataSink::PlayFromAudioQueue()
+ audio->EnsureAudioBuffer();
+ RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
+ AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
+ AutoTArray<const AudioDataValue*, 2> channels;
+ for (uint32_t i = 0; i < audio->mChannels; ++i) {
+ channels.AppendElement(bufferData + i * audio->mFrames);
+ }
+ aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
+ aStream->mAudioFramesWritten += audio->mFrames;
+
+ aStream->mNextAudioTime = audio->GetEndTime();
+}
+
+void
+DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ AssertOwnerThread();
+
+ if (!mInfo.HasAudio()) {
+ return;
+ }
+
+ AudioSegment output;
+ uint32_t rate = mInfo.mAudio.mRate;
+ AutoTArray<RefPtr<MediaData>,10> audio;
+ TrackID audioTrackId = mInfo.mAudio.mTrackId;
+ SourceMediaStream* sourceStream = mData->mStream;
+
+ // It's OK to hold references to the AudioData because AudioData
+ // is ref-counted.
+ mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
+ for (uint32_t i = 0; i < audio.Length(); ++i) {
+ SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate,
+ aPrincipalHandle);
+ }
+
+ output.ApplyVolume(aVolume);
+
+ if (!aIsSameOrigin) {
+ output.ReplaceWithDisabled();
+ }
+
+ // |mNextAudioTime| is updated as we process each audio sample in
+ // SendStreamAudio(). This is consistent with how |mNextVideoTime|
+ // is updated for video samples.
+ if (output.GetDuration() > 0) {
+ sourceStream->AppendToTrack(audioTrackId, &output);
+ }
+
+ if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
+ sourceStream->EndTrack(audioTrackId);
+ mData->mHaveSentFinishAudio = true;
+ }
+}
+
+static void
+WriteVideoToMediaStream(MediaStream* aStream,
+ layers::Image* aImage,
+ int64_t aEndMicroseconds,
+ int64_t aStartMicroseconds,
+ const mozilla::gfx::IntSize& aIntrinsicSize,
+ const TimeStamp& aTimeStamp,
+ VideoSegment* aOutput,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ RefPtr<layers::Image> image = aImage;
+ StreamTime duration =
+ aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
+ aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
+ aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize,
+ aPrincipalHandle, false, aTimeStamp);
+}
+
+static bool
+ZeroDurationAtLastChunk(VideoSegment& aInput)
+{
+ // Get the last video frame's start time in VideoSegment aInput.
+ // If the start time is equal to the duration of aInput, means the last video
+ // frame's duration is zero.
+ StreamTime lastVideoStratTime;
+ aInput.GetLastFrame(&lastVideoStratTime);
+ return lastVideoStratTime == aInput.GetDuration();
+}
+
+void
+DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle)
+{
+ AssertOwnerThread();
+
+ if (!mInfo.HasVideo()) {
+ return;
+ }
+
+ VideoSegment output;
+ TrackID videoTrackId = mInfo.mVideo.mTrackId;
+ AutoTArray<RefPtr<MediaData>, 10> video;
+ SourceMediaStream* sourceStream = mData->mStream;
+
+ // It's OK to hold references to the VideoData because VideoData
+ // is ref-counted.
+ mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
+
+ // tracksStartTimeStamp might be null when the SourceMediaStream not yet
+ // be added to MediaStreamGraph.
+ TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp();
+ if (tracksStartTimeStamp.IsNull()) {
+ tracksStartTimeStamp = TimeStamp::Now();
+ }
+
+ for (uint32_t i = 0; i < video.Length(); ++i) {
+ VideoData* v = video[i]->As<VideoData>();
+
+ if (mData->mNextVideoTime < v->mTime) {
+ // Write last video frame to catch up. mLastVideoImage can be null here
+ // which is fine, it just means there's no video.
+
+ // TODO: |mLastVideoImage| should come from the last image rendered
+ // by the state machine. This will avoid the black frame when capture
+ // happens in the middle of playback (especially in th middle of a
+ // video frame). E.g. if we have a video frame that is 30 sec long
+ // and capture happens at 15 sec, we'll have to append a black frame
+ // that is 15 sec long.
+ WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
+ mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
+ tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
+ &output, aPrincipalHandle);
+ mData->mNextVideoTime = v->mTime;
+ }
+
+ if (mData->mNextVideoTime < v->GetEndTime()) {
+ WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
+ mData->mNextVideoTime, v->mDisplay,
+ tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()),
+ &output, aPrincipalHandle);
+ mData->mNextVideoTime = v->GetEndTime();
+ mData->mLastVideoImage = v->mImage;
+ mData->mLastVideoImageDisplaySize = v->mDisplay;
+ }
+ }
+
+ // Check the output is not empty.
+ if (output.GetLastFrame()) {
+ mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
+ }
+
+ if (!aIsSameOrigin) {
+ output.ReplaceWithDisabled();
+ }
+
+ if (output.GetDuration() > 0) {
+ sourceStream->AppendToTrack(videoTrackId, &output);
+ }
+
+ if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
+ if (mData->mEOSVideoCompensation) {
+ VideoSegment endSegment;
+ // Calculate the deviation clock time from DecodedStream.
+ int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
+ WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
+ mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
+ mData->mLastVideoImageDisplaySize,
+ tracksStartTimeStamp + TimeDuration::FromMicroseconds(mData->mNextVideoTime + deviation_usec),
+ &endSegment, aPrincipalHandle);
+ mData->mNextVideoTime += deviation_usec;
+ MOZ_ASSERT(endSegment.GetDuration() > 0);
+ if (!aIsSameOrigin) {
+ endSegment.ReplaceWithDisabled();
+ }
+ sourceStream->AppendToTrack(videoTrackId, &endSegment);
+ }
+ sourceStream->EndTrack(videoTrackId);
+ mData->mHaveSentFinishVideo = true;
+ }
+}
+
+void
+DecodedStream::AdvanceTracks()
+{
+ AssertOwnerThread();
+
+ StreamTime endPosition = 0;
+
+ if (mInfo.HasAudio()) {
+ StreamTime audioEnd = mData->mStream->TicksToTimeRoundDown(
+ mInfo.mAudio.mRate, mData->mAudioFramesWritten);
+ endPosition = std::max(endPosition, audioEnd);
+ }
+
+ if (mInfo.HasVideo()) {
+ StreamTime videoEnd = mData->mStream->MicrosecondsToStreamTimeRoundDown(
+ mData->mNextVideoTime - mStartTime.ref());
+ endPosition = std::max(endPosition, videoEnd);
+ }
+
+ if (!mData->mHaveSentFinish) {
+ mData->mStream->AdvanceKnownTracksTime(endPosition);
+ }
+}
+
+void
+DecodedStream::SendData()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()");
+
+ // Not yet created on the main thread. MDSM will try again later.
+ if (!mData) {
+ return;
+ }
+
+ // Nothing to do when the stream is finished.
+ if (mData->mHaveSentFinish) {
+ return;
+ }
+
+ SendAudio(mParams.mVolume, mSameOrigin, mPrincipalHandle);
+ SendVideo(mSameOrigin, mPrincipalHandle);
+ AdvanceTracks();
+
+ bool finished = (!mInfo.HasAudio() || mAudioQueue.IsFinished()) &&
+ (!mInfo.HasVideo() || mVideoQueue.IsFinished());
+
+ if (finished && !mData->mHaveSentFinish) {
+ mData->mHaveSentFinish = true;
+ mData->mStream->Finish();
+ }
+}
+
+int64_t
+DecodedStream::GetEndTime(TrackType aType) const
+{
+ AssertOwnerThread();
+ if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
+ CheckedInt64 t = mStartTime.ref() +
+ FramesToUsecs(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
+ if (t.isValid()) {
+ return t.value();
+ }
+ } else if (aType == TrackInfo::kVideoTrack && mData) {
+ return mData->mNextVideoTime;
+ }
+ return -1;
+}
+
+int64_t
+DecodedStream::GetPosition(TimeStamp* aTimeStamp) const
+{
+ AssertOwnerThread();
+ // This is only called after MDSM starts playback. So mStartTime is
+ // guaranteed to be something.
+ MOZ_ASSERT(mStartTime.isSome());
+ if (aTimeStamp) {
+ *aTimeStamp = TimeStamp::Now();
+ }
+ return mStartTime.ref() + mLastOutputTime;
+}
+
+void
+DecodedStream::NotifyOutput(int64_t aTime)
+{
+ AssertOwnerThread();
+ mLastOutputTime = aTime;
+ int64_t currentTime = GetPosition();
+
+ // Remove audio samples that have been played by MSG from the queue.
+ RefPtr<MediaData> a = mAudioQueue.PeekFront();
+ for (; a && a->mTime < currentTime;) {
+ RefPtr<MediaData> releaseMe = mAudioQueue.PopFront();
+ a = mAudioQueue.PeekFront();
+ }
+}
+
+void
+DecodedStream::ConnectListener()
+{
+ AssertOwnerThread();
+
+ mAudioPushListener = mAudioQueue.PushEvent().Connect(
+ mOwnerThread, this, &DecodedStream::SendData);
+ mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
+ mOwnerThread, this, &DecodedStream::SendData);
+ mVideoPushListener = mVideoQueue.PushEvent().Connect(
+ mOwnerThread, this, &DecodedStream::SendData);
+ mVideoFinishListener = mVideoQueue.FinishEvent().Connect(
+ mOwnerThread, this, &DecodedStream::SendData);
+}
+
+void
+DecodedStream::DisconnectListener()
+{
+ AssertOwnerThread();
+
+ mAudioPushListener.Disconnect();
+ mVideoPushListener.Disconnect();
+ mAudioFinishListener.Disconnect();
+ mVideoFinishListener.Disconnect();
+}
+
+void
+DecodedStream::DumpDebugInfo()
+{
+ AssertOwnerThread();
+ DUMP_LOG(
+ "DecodedStream=%p mStartTime=%lld mLastOutputTime=%lld mPlaying=%d mData=%p",
+ this, mStartTime.valueOr(-1), mLastOutputTime, mPlaying, mData.get());
+ if (mData) {
+ mData->DumpDebugInfo();
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/mediasink/DecodedStream.h b/dom/media/mediasink/DecodedStream.h
new file mode 100644
index 000000000..f2c606bc4
--- /dev/null
+++ b/dom/media/mediasink/DecodedStream.h
@@ -0,0 +1,122 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef DecodedStream_h_
+#define DecodedStream_h_
+
+#include "MediaEventSource.h"
+#include "MediaInfo.h"
+#include "MediaSink.h"
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+
+class DecodedStreamData;
+class MediaData;
+class MediaStream;
+class OutputStreamManager;
+struct PlaybackInfoInit;
+class ProcessedMediaStream;
+class TimeStamp;
+
+template <class T> class MediaQueue;
+
+class DecodedStream : public media::MediaSink {
+ using media::MediaSink::PlaybackParams;
+
+public:
+ DecodedStream(AbstractThread* aOwnerThread,
+ MediaQueue<MediaData>& aAudioQueue,
+ MediaQueue<MediaData>& aVideoQueue,
+ OutputStreamManager* aOutputStreamManager,
+ const bool& aSameOrigin,
+ const PrincipalHandle& aPrincipalHandle);
+
+ // MediaSink functions.
+ const PlaybackParams& GetPlaybackParams() const override;
+ void SetPlaybackParams(const PlaybackParams& aParams) override;
+
+ RefPtr<GenericPromise> OnEnded(TrackType aType) override;
+ int64_t GetEndTime(TrackType aType) const override;
+ int64_t GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
+ bool HasUnplayedFrames(TrackType aType) const override
+ {
+ // TODO: implement this.
+ return false;
+ }
+
+ void SetVolume(double aVolume) override;
+ void SetPlaybackRate(double aPlaybackRate) override;
+ void SetPreservesPitch(bool aPreservesPitch) override;
+ void SetPlaying(bool aPlaying) override;
+
+ void Start(int64_t aStartTime, const MediaInfo& aInfo) override;
+ void Stop() override;
+ bool IsStarted() const override;
+ bool IsPlaying() const override;
+
+ void DumpDebugInfo() override;
+
+protected:
+ virtual ~DecodedStream();
+
+private:
+ void DestroyData(UniquePtr<DecodedStreamData> aData);
+ void AdvanceTracks();
+ void SendAudio(double aVolume, bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle);
+ void SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle);
+ void SendData();
+ void NotifyOutput(int64_t aTime);
+
+ void AssertOwnerThread() const {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ }
+
+ void ConnectListener();
+ void DisconnectListener();
+
+ const RefPtr<AbstractThread> mOwnerThread;
+
+ /*
+ * Main thread only members.
+ */
+ // Data about MediaStreams that are being fed by the decoder.
+ const RefPtr<OutputStreamManager> mOutputStreamManager;
+
+ /*
+ * Worker thread only members.
+ */
+ UniquePtr<DecodedStreamData> mData;
+ RefPtr<GenericPromise> mFinishPromise;
+
+ bool mPlaying;
+ const bool& mSameOrigin; // valid until Shutdown() is called.
+ const PrincipalHandle& mPrincipalHandle; // valid until Shutdown() is called.
+
+ PlaybackParams mParams;
+
+ Maybe<int64_t> mStartTime;
+ int64_t mLastOutputTime = 0; // microseconds
+ MediaInfo mInfo;
+
+ MediaQueue<MediaData>& mAudioQueue;
+ MediaQueue<MediaData>& mVideoQueue;
+
+ MediaEventListener mAudioPushListener;
+ MediaEventListener mVideoPushListener;
+ MediaEventListener mAudioFinishListener;
+ MediaEventListener mVideoFinishListener;
+ MediaEventListener mOutputListener;
+};
+
+} // namespace mozilla
+
+#endif // DecodedStream_h_
diff --git a/dom/media/mediasink/MediaSink.h b/dom/media/mediasink/MediaSink.h
new file mode 100644
index 000000000..09b79149e
--- /dev/null
+++ b/dom/media/mediasink/MediaSink.h
@@ -0,0 +1,133 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaSink_h_
+#define MediaSink_h_
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/MozPromise.h"
+#include "nsISupportsImpl.h"
+#include "MediaInfo.h"
+
+namespace mozilla {
+
+class TimeStamp;
+
+namespace media {
+
+/**
+ * A consumer of audio/video data which plays audio and video tracks and
+ * manages A/V sync between them.
+ *
+ * A typical sink sends audio/video outputs to the speaker and screen.
+ * However, there are also sinks which capture the output of an media element
+ * and send the output to a MediaStream.
+ *
+ * This class is used to move A/V sync management and audio/video rendering
+ * out of MDSM so it is possible for subclasses to do external rendering using
+ * specific hardware which is required by TV projects and CDM.
+ *
+ * Note this class is not thread-safe and should be called from the state
+ * machine thread only.
+ */
+class MediaSink {
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
+ typedef mozilla::TrackInfo::TrackType TrackType;
+
+ struct PlaybackParams {
+ PlaybackParams()
+ : mVolume(1.0) , mPlaybackRate(1.0) , mPreservesPitch(true) {}
+ double mVolume;
+ double mPlaybackRate;
+ bool mPreservesPitch;
+ };
+
+ // Return the playback parameters of this sink.
+ // Can be called in any state.
+ virtual const PlaybackParams& GetPlaybackParams() const = 0;
+
+ // Set the playback parameters of this sink.
+ // Can be called in any state.
+ virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
+
+ // Return a promise which is resolved when the track finishes
+ // or null if no such track.
+ // Must be called after playback starts.
+ virtual RefPtr<GenericPromise> OnEnded(TrackType aType) = 0;
+
+ // Return the end time of the audio/video data that has been consumed
+ // or -1 if no such track.
+ // Must be called after playback starts.
+ virtual int64_t GetEndTime(TrackType aType) const = 0;
+
+ // Return playback position of the media.
+ // Since A/V sync is always maintained by this sink, there is no need to
+ // specify whether we want to get audio or video position.
+ // aTimeStamp returns the timeStamp corresponding to the returned position
+ // which is used by the compositor to derive the render time of video frames.
+ // Must be called after playback starts.
+ virtual int64_t GetPosition(TimeStamp* aTimeStamp = nullptr) const = 0;
+
+ // Return true if there are data consumed but not played yet.
+ // Can be called in any state.
+ virtual bool HasUnplayedFrames(TrackType aType) const = 0;
+
+ // Set volume of the audio track.
+ // Do nothing if this sink has no audio track.
+ // Can be called in any state.
+ virtual void SetVolume(double aVolume) {}
+
+ // Set the playback rate.
+ // Can be called in any state.
+ virtual void SetPlaybackRate(double aPlaybackRate) {}
+
+ // Whether to preserve pitch of the audio track.
+ // Do nothing if this sink has no audio track.
+ // Can be called in any state.
+ virtual void SetPreservesPitch(bool aPreservesPitch) {}
+
+ // Pause/resume the playback. Only work after playback starts.
+ virtual void SetPlaying(bool aPlaying) = 0;
+
+ // Single frame rendering operation may need to be done before playback
+ // started (1st frame) or right after seek completed or playback stopped.
+ // Do nothing if this sink has no video track. Can be called in any state.
+ virtual void Redraw(const VideoInfo& aInfo) {};
+
+ // Begin a playback session with the provided start time and media info.
+ // Must be called when playback is stopped.
+ virtual void Start(int64_t aStartTime, const MediaInfo& aInfo) = 0;
+
+ // Finish a playback session.
+ // Must be called after playback starts.
+ virtual void Stop() = 0;
+
+ // Return true if playback has started.
+ // Can be called in any state.
+ virtual bool IsStarted() const = 0;
+
+ // Return true if playback is started and not paused otherwise false.
+ // Can be called in any state.
+ virtual bool IsPlaying() const = 0;
+
+ // Called on the state machine thread to shut down the sink. All resources
+ // allocated by this sink should be released.
+ // Must be called after playback stopped.
+ virtual void Shutdown() {}
+
+ // Dump debugging information to the logs.
+ // Can be called in any phase.
+ virtual void DumpDebugInfo() {}
+
+protected:
+ virtual ~MediaSink() {}
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif //MediaSink_h_
diff --git a/dom/media/mediasink/OutputStreamManager.cpp b/dom/media/mediasink/OutputStreamManager.cpp
new file mode 100644
index 000000000..d5685837a
--- /dev/null
+++ b/dom/media/mediasink/OutputStreamManager.cpp
@@ -0,0 +1,134 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaStreamGraph.h"
+#include "OutputStreamManager.h"
+
+namespace mozilla {
+
+OutputStreamData::~OutputStreamData()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ // Break the connection to the input stream if necessary.
+ if (mPort) {
+ mPort->Destroy();
+ }
+}
+
+void
+OutputStreamData::Init(OutputStreamManager* aOwner, ProcessedMediaStream* aStream)
+{
+ mOwner = aOwner;
+ mStream = aStream;
+}
+
+bool
+OutputStreamData::Connect(MediaStream* aStream)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!mPort, "Already connected?");
+
+ if (mStream->IsDestroyed()) {
+ return false;
+ }
+
+ mPort = mStream->AllocateInputPort(aStream);
+ return true;
+}
+
+bool
+OutputStreamData::Disconnect()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // During cycle collection, DOMMediaStream can be destroyed and send
+ // its Destroy message before this decoder is destroyed. So we have to
+ // be careful not to send any messages after the Destroy().
+ if (mStream->IsDestroyed()) {
+ return false;
+ }
+
+ // Disconnect the existing port if necessary.
+ if (mPort) {
+ mPort->Destroy();
+ mPort = nullptr;
+ }
+ return true;
+}
+
+bool
+OutputStreamData::Equals(MediaStream* aStream) const
+{
+ return mStream == aStream;
+}
+
+MediaStreamGraph*
+OutputStreamData::Graph() const
+{
+ return mStream->Graph();
+}
+
+void
+OutputStreamManager::Add(ProcessedMediaStream* aStream, bool aFinishWhenEnded)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ // All streams must belong to the same graph.
+ MOZ_ASSERT(!Graph() || Graph() == aStream->Graph());
+
+ // Ensure that aStream finishes the moment mDecodedStream does.
+ if (aFinishWhenEnded) {
+ aStream->SetAutofinish(true);
+ }
+
+ OutputStreamData* p = mStreams.AppendElement();
+ p->Init(this, aStream);
+
+ // Connect to the input stream if we have one. Otherwise the output stream
+ // will be connected in Connect().
+ if (mInputStream) {
+ p->Connect(mInputStream);
+ }
+}
+
+void
+OutputStreamManager::Remove(MediaStream* aStream)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ for (int32_t i = mStreams.Length() - 1; i >= 0; --i) {
+ if (mStreams[i].Equals(aStream)) {
+ mStreams.RemoveElementAt(i);
+ break;
+ }
+ }
+}
+
+void
+OutputStreamManager::Connect(MediaStream* aStream)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ mInputStream = aStream;
+ for (int32_t i = mStreams.Length() - 1; i >= 0; --i) {
+ if (!mStreams[i].Connect(aStream)) {
+ // Probably the DOMMediaStream was GCed. Clean up.
+ mStreams.RemoveElementAt(i);
+ }
+ }
+}
+
+void
+OutputStreamManager::Disconnect()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ mInputStream = nullptr;
+ for (int32_t i = mStreams.Length() - 1; i >= 0; --i) {
+ if (!mStreams[i].Disconnect()) {
+ // Probably the DOMMediaStream was GCed. Clean up.
+ mStreams.RemoveElementAt(i);
+ }
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/mediasink/OutputStreamManager.h b/dom/media/mediasink/OutputStreamManager.h
new file mode 100644
index 000000000..7f91a60c1
--- /dev/null
+++ b/dom/media/mediasink/OutputStreamManager.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef OutputStreamManager_h
+#define OutputStreamManager_h
+
+#include "mozilla/RefPtr.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+
+class MediaInputPort;
+class MediaStream;
+class MediaStreamGraph;
+class OutputStreamManager;
+class ProcessedMediaStream;
+
+class OutputStreamData {
+public:
+ ~OutputStreamData();
+ void Init(OutputStreamManager* aOwner, ProcessedMediaStream* aStream);
+
+ // Connect mStream to the input stream.
+ // Return false is mStream is already destroyed, otherwise true.
+ bool Connect(MediaStream* aStream);
+ // Disconnect mStream from its input stream.
+ // Return false is mStream is already destroyed, otherwise true.
+ bool Disconnect();
+ // Return true if aStream points to the same object as mStream.
+ // Used by OutputStreamManager to remove an output stream.
+ bool Equals(MediaStream* aStream) const;
+ // Return the graph mStream belongs to.
+ MediaStreamGraph* Graph() const;
+
+private:
+ OutputStreamManager* mOwner;
+ RefPtr<ProcessedMediaStream> mStream;
+ // mPort connects our mStream to an input stream.
+ RefPtr<MediaInputPort> mPort;
+};
+
+class OutputStreamManager {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamManager);
+
+public:
+ // Add the output stream to the collection.
+ void Add(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
+ // Remove the output stream from the collection.
+ void Remove(MediaStream* aStream);
+ // Return true if the collection empty.
+ bool IsEmpty() const
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ return mStreams.IsEmpty();
+ }
+ // Connect all output streams in the collection to the input stream.
+ void Connect(MediaStream* aStream);
+ // Disconnect all output streams from the input stream.
+ void Disconnect();
+ // Return the graph these streams belong to or null if empty.
+ MediaStreamGraph* Graph() const
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ return !IsEmpty() ? mStreams[0].Graph() : nullptr;
+ }
+
+private:
+ ~OutputStreamManager() {}
+ // Keep the input stream so we can connect the output streams that
+ // are added after Connect().
+ RefPtr<MediaStream> mInputStream;
+ nsTArray<OutputStreamData> mStreams;
+};
+
+} // namespace mozilla
+
+#endif // OutputStreamManager_h
diff --git a/dom/media/mediasink/VideoSink.cpp b/dom/media/mediasink/VideoSink.cpp
new file mode 100644
index 000000000..18c0b22ad
--- /dev/null
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -0,0 +1,486 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaQueue.h"
+#include "VideoSink.h"
+#include "MediaPrefs.h"
+
+namespace mozilla {
+
+extern LazyLogModule gMediaDecoderLog;
+
+#undef FMT
+#undef DUMP_LOG
+
+#define FMT(x, ...) "VideoSink=%p " x, this, ##__VA_ARGS__
+#define VSINK_LOG(x, ...) MOZ_LOG(gMediaDecoderLog, LogLevel::Debug, (FMT(x, ##__VA_ARGS__)))
+#define VSINK_LOG_V(x, ...) MOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, (FMT(x, ##__VA_ARGS__)))
+#define DUMP_LOG(x, ...) NS_DebugBreak(NS_DEBUG_WARNING, nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, nullptr, -1)
+
+using namespace mozilla::layers;
+
+namespace media {
+
+// Minimum update frequency is 1/120th of a second, i.e. half the
+// duration of a 60-fps frame.
+static const int64_t MIN_UPDATE_INTERVAL_US = 1000000 / (60 * 2);
+
+VideoSink::VideoSink(AbstractThread* aThread,
+ MediaSink* aAudioSink,
+ MediaQueue<MediaData>& aVideoQueue,
+ VideoFrameContainer* aContainer,
+ FrameStatistics& aFrameStats,
+ uint32_t aVQueueSentToCompositerSize)
+ : mOwnerThread(aThread)
+ , mAudioSink(aAudioSink)
+ , mVideoQueue(aVideoQueue)
+ , mContainer(aContainer)
+ , mProducerID(ImageContainer::AllocateProducerID())
+ , mFrameStats(aFrameStats)
+ , mVideoFrameEndTime(-1)
+ , mHasVideo(false)
+ , mUpdateScheduler(aThread)
+ , mVideoQueueSendToCompositorSize(aVQueueSentToCompositerSize)
+ , mMinVideoQueueSize(MediaPrefs::RuinAvSync() ? 1 : 0)
+{
+ MOZ_ASSERT(mAudioSink, "AudioSink should exist.");
+}
+
+VideoSink::~VideoSink()
+{
+}
+
+const MediaSink::PlaybackParams&
+VideoSink::GetPlaybackParams() const
+{
+ AssertOwnerThread();
+
+ return mAudioSink->GetPlaybackParams();
+}
+
+void
+VideoSink::SetPlaybackParams(const PlaybackParams& aParams)
+{
+ AssertOwnerThread();
+
+ mAudioSink->SetPlaybackParams(aParams);
+}
+
+RefPtr<GenericPromise>
+VideoSink::OnEnded(TrackType aType)
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
+
+ if (aType == TrackInfo::kAudioTrack) {
+ return mAudioSink->OnEnded(aType);
+ } else if (aType == TrackInfo::kVideoTrack) {
+ return mEndPromise;
+ }
+ return nullptr;
+}
+
+int64_t
+VideoSink::GetEndTime(TrackType aType) const
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
+
+ if (aType == TrackInfo::kVideoTrack) {
+ return mVideoFrameEndTime;
+ } else if (aType == TrackInfo::kAudioTrack) {
+ return mAudioSink->GetEndTime(aType);
+ }
+ return -1;
+}
+
+int64_t
+VideoSink::GetPosition(TimeStamp* aTimeStamp) const
+{
+ AssertOwnerThread();
+
+ return mAudioSink->GetPosition(aTimeStamp);
+}
+
+bool
+VideoSink::HasUnplayedFrames(TrackType aType) const
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(aType == TrackInfo::kAudioTrack, "Not implemented for non audio tracks.");
+
+ return mAudioSink->HasUnplayedFrames(aType);
+}
+
+void
+VideoSink::SetPlaybackRate(double aPlaybackRate)
+{
+ AssertOwnerThread();
+
+ mAudioSink->SetPlaybackRate(aPlaybackRate);
+}
+
+void
+VideoSink::SetVolume(double aVolume)
+{
+ AssertOwnerThread();
+
+ mAudioSink->SetVolume(aVolume);
+}
+
+void
+VideoSink::SetPreservesPitch(bool aPreservesPitch)
+{
+ AssertOwnerThread();
+
+ mAudioSink->SetPreservesPitch(aPreservesPitch);
+}
+
+void
+VideoSink::SetPlaying(bool aPlaying)
+{
+ AssertOwnerThread();
+ VSINK_LOG_V(" playing (%d) -> (%d)", mAudioSink->IsPlaying(), aPlaying);
+
+ if (!aPlaying) {
+ // Reset any update timer if paused.
+ mUpdateScheduler.Reset();
+ // Since playback is paused, tell compositor to render only current frame.
+ RenderVideoFrames(1);
+ if (mContainer) {
+ mContainer->ClearCachedResources();
+ }
+ }
+
+ mAudioSink->SetPlaying(aPlaying);
+
+ if (mHasVideo && aPlaying) {
+ // There's no thread in VideoSink for pulling video frames, need to trigger
+ // rendering while becoming playing status. because the VideoQueue may be
+ // full already.
+ TryUpdateRenderedVideoFrames();
+ }
+}
+
+void
+VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo)
+{
+ AssertOwnerThread();
+ VSINK_LOG("[%s]", __func__);
+
+ mAudioSink->Start(aStartTime, aInfo);
+
+ mHasVideo = aInfo.HasVideo();
+
+ if (mHasVideo) {
+ mEndPromise = mEndPromiseHolder.Ensure(__func__);
+
+ // If the underlying MediaSink has an end promise for the video track (which
+ // happens when mAudioSink refers to a DecodedStream), we must wait for it
+ // to complete before resolving our own end promise. Otherwise, MDSM might
+ // stop playback before DecodedStream plays to the end and cause
+ // test_streams_element_capture.html to time out.
+ RefPtr<GenericPromise> p = mAudioSink->OnEnded(TrackInfo::kVideoTrack);
+ if (p) {
+ RefPtr<VideoSink> self = this;
+ mVideoSinkEndRequest.Begin(p->Then(mOwnerThread, __func__,
+ [self] () {
+ self->mVideoSinkEndRequest.Complete();
+ self->TryUpdateRenderedVideoFrames();
+ // It is possible the video queue size is 0 and we have no frames to
+ // render. However, we need to call MaybeResolveEndPromise() to ensure
+ // mEndPromiseHolder is resolved.
+ self->MaybeResolveEndPromise();
+ }, [self] () {
+ self->mVideoSinkEndRequest.Complete();
+ self->TryUpdateRenderedVideoFrames();
+ self->MaybeResolveEndPromise();
+ }));
+ }
+
+ ConnectListener();
+ // Run the render loop at least once so we can resolve the end promise
+ // when video duration is 0.
+ UpdateRenderedVideoFrames();
+ }
+}
+
+void
+VideoSink::Stop()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mAudioSink->IsStarted(), "playback not started.");
+ VSINK_LOG("[%s]", __func__);
+
+ mAudioSink->Stop();
+
+ mUpdateScheduler.Reset();
+ if (mHasVideo) {
+ DisconnectListener();
+ mVideoSinkEndRequest.DisconnectIfExists();
+ mEndPromiseHolder.ResolveIfExists(true, __func__);
+ mEndPromise = nullptr;
+ }
+ mVideoFrameEndTime = -1;
+}
+
+bool
+VideoSink::IsStarted() const
+{
+ AssertOwnerThread();
+
+ return mAudioSink->IsStarted();
+}
+
+bool
+VideoSink::IsPlaying() const
+{
+ AssertOwnerThread();
+
+ return mAudioSink->IsPlaying();
+}
+
+void
+VideoSink::Shutdown()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(!mAudioSink->IsStarted(), "must be called after playback stops.");
+ VSINK_LOG("[%s]", __func__);
+
+ mAudioSink->Shutdown();
+}
+
+void
+VideoSink::OnVideoQueuePushed(RefPtr<MediaData>&& aSample)
+{
+ AssertOwnerThread();
+ // Listen to push event, VideoSink should try rendering ASAP if first frame
+ // arrives but update scheduler is not triggered yet.
+ VideoData* v = aSample->As<VideoData>();
+ if (!v->mSentToCompositor) {
+ // Since we push rendered frames back to the queue, we will receive
+ // push events for them. We only need to trigger render loop
+ // when this frame is not rendered yet.
+ TryUpdateRenderedVideoFrames();
+ }
+}
+
+void
+VideoSink::OnVideoQueueFinished()
+{
+ AssertOwnerThread();
+ // Run render loop if the end promise is not resolved yet.
+ if (!mUpdateScheduler.IsScheduled() &&
+ mAudioSink->IsPlaying() &&
+ !mEndPromiseHolder.IsEmpty()) {
+ UpdateRenderedVideoFrames();
+ }
+}
+
+void
+VideoSink::Redraw(const VideoInfo& aInfo)
+{
+ AssertOwnerThread();
+
+ // No video track, nothing to draw.
+ if (!aInfo.IsValid() || !mContainer) {
+ return;
+ }
+
+ if (VideoQueue().GetSize() > 0) {
+ RenderVideoFrames(1);
+ return;
+ }
+
+ // When we reach here, it means there are no frames in this video track.
+ // Draw a blank frame to ensure there is something in the image container
+ // to fire 'loadeddata'.
+ RefPtr<Image> blank =
+ mContainer->GetImageContainer()->CreatePlanarYCbCrImage();
+ mContainer->SetCurrentFrame(aInfo.mDisplay, blank, TimeStamp::Now());
+}
+
+void
+VideoSink::TryUpdateRenderedVideoFrames()
+{
+ AssertOwnerThread();
+ if (!mUpdateScheduler.IsScheduled() && VideoQueue().GetSize() >= 1 &&
+ mAudioSink->IsPlaying()) {
+ UpdateRenderedVideoFrames();
+ }
+}
+
+void
+VideoSink::UpdateRenderedVideoFramesByTimer()
+{
+ AssertOwnerThread();
+ mUpdateScheduler.CompleteRequest();
+ UpdateRenderedVideoFrames();
+}
+
+void
+VideoSink::ConnectListener()
+{
+ AssertOwnerThread();
+ mPushListener = VideoQueue().PushEvent().Connect(
+ mOwnerThread, this, &VideoSink::OnVideoQueuePushed);
+ mFinishListener = VideoQueue().FinishEvent().Connect(
+ mOwnerThread, this, &VideoSink::OnVideoQueueFinished);
+}
+
+void
+VideoSink::DisconnectListener()
+{
+ AssertOwnerThread();
+ mPushListener.Disconnect();
+ mFinishListener.Disconnect();
+}
+
+void
+VideoSink::RenderVideoFrames(int32_t aMaxFrames,
+ int64_t aClockTime,
+ const TimeStamp& aClockTimeStamp)
+{
+ AssertOwnerThread();
+
+ AutoTArray<RefPtr<MediaData>,16> frames;
+ VideoQueue().GetFirstElements(aMaxFrames, &frames);
+ if (frames.IsEmpty() || !mContainer) {
+ return;
+ }
+
+ AutoTArray<ImageContainer::NonOwningImage,16> images;
+ TimeStamp lastFrameTime;
+ MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
+ for (uint32_t i = 0; i < frames.Length(); ++i) {
+ VideoData* frame = frames[i]->As<VideoData>();
+
+ frame->mSentToCompositor = true;
+
+ if (!frame->mImage || !frame->mImage->IsValid() ||
+ !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
+ continue;
+ }
+
+ int64_t frameTime = frame->mTime;
+ if (frameTime < 0) {
+ // Frame times before the start time are invalid; drop such frames
+ continue;
+ }
+
+ TimeStamp t;
+ if (aMaxFrames > 1) {
+ MOZ_ASSERT(!aClockTimeStamp.IsNull());
+ int64_t delta = frame->mTime - aClockTime;
+ t = aClockTimeStamp +
+ TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
+ if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
+ // Timestamps out of order; drop the new frame. In theory we should
+ // probably replace the previous frame with the new frame if the
+ // timestamps are equal, but this is a corrupt video file already so
+ // never mind.
+ continue;
+ }
+ lastFrameTime = t;
+ }
+
+ ImageContainer::NonOwningImage* img = images.AppendElement();
+ img->mTimeStamp = t;
+ img->mImage = frame->mImage;
+ img->mFrameID = frame->mFrameID;
+ img->mProducerID = mProducerID;
+
+ VSINK_LOG_V("playing video frame %lld (id=%x) (vq-queued=%i)",
+ frame->mTime, frame->mFrameID, VideoQueue().GetSize());
+ }
+ mContainer->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images);
+}
+
+void
+VideoSink::UpdateRenderedVideoFrames()
+{
+ AssertOwnerThread();
+ MOZ_ASSERT(mAudioSink->IsPlaying(), "should be called while playing.");
+
+ // Get the current playback position.
+ TimeStamp nowTime;
+ const int64_t clockTime = mAudioSink->GetPosition(&nowTime);
+ NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
+
+ // Skip frames up to the playback position.
+ int64_t lastFrameEndTime = 0;
+ while (VideoQueue().GetSize() > mMinVideoQueueSize &&
+ clockTime >= VideoQueue().PeekFront()->GetEndTime()) {
+ RefPtr<MediaData> frame = VideoQueue().PopFront();
+ lastFrameEndTime = frame->GetEndTime();
+ if (frame->As<VideoData>()->mSentToCompositor) {
+ mFrameStats.NotifyPresentedFrame();
+ } else {
+ mFrameStats.NotifyDecodedFrames({ 0, 0, 1 });
+ VSINK_LOG_V("discarding video frame mTime=%lld clock_time=%lld",
+ frame->mTime, clockTime);
+ }
+ }
+
+ // The presentation end time of the last video frame displayed is either
+ // the end time of the current frame, or if we dropped all frames in the
+ // queue, the end time of the last frame we removed from the queue.
+ RefPtr<MediaData> currentFrame = VideoQueue().PeekFront();
+ mVideoFrameEndTime = std::max(mVideoFrameEndTime,
+ currentFrame ? currentFrame->GetEndTime() : lastFrameEndTime);
+
+ MaybeResolveEndPromise();
+
+ RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime);
+
+ // Get the timestamp of the next frame. Schedule the next update at
+ // the start time of the next frame. If we don't have a next frame,
+ // we will run render loops again upon incoming frames.
+ nsTArray<RefPtr<MediaData>> frames;
+ VideoQueue().GetFirstElements(2, &frames);
+ if (frames.Length() < 2) {
+ return;
+ }
+
+ int64_t nextFrameTime = frames[1]->mTime;
+ int64_t delta = std::max<int64_t>((nextFrameTime - clockTime), MIN_UPDATE_INTERVAL_US);
+ TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
+ delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
+
+ RefPtr<VideoSink> self = this;
+ mUpdateScheduler.Ensure(target, [self] () {
+ self->UpdateRenderedVideoFramesByTimer();
+ }, [self] () {
+ self->UpdateRenderedVideoFramesByTimer();
+ });
+}
+
+void
+VideoSink::MaybeResolveEndPromise()
+{
+ AssertOwnerThread();
+ // All frames are rendered, Let's resolve the promise.
+ if (VideoQueue().IsFinished() &&
+ VideoQueue().GetSize() <= 1 &&
+ !mVideoSinkEndRequest.Exists()) {
+ mEndPromiseHolder.ResolveIfExists(true, __func__);
+ }
+}
+
+void
+VideoSink::DumpDebugInfo()
+{
+ AssertOwnerThread();
+ DUMP_LOG(
+ "IsStarted=%d IsPlaying=%d, VideoQueue: finished=%d size=%d, "
+ "mVideoFrameEndTime=%lld mHasVideo=%d mVideoSinkEndRequest.Exists()=%d "
+ "mEndPromiseHolder.IsEmpty()=%d",
+ IsStarted(), IsPlaying(), VideoQueue().IsFinished(), VideoQueue().GetSize(),
+ mVideoFrameEndTime, mHasVideo, mVideoSinkEndRequest.Exists(), mEndPromiseHolder.IsEmpty());
+ mAudioSink->DumpDebugInfo();
+}
+
+} // namespace media
+} // namespace mozilla
diff --git a/dom/media/mediasink/VideoSink.h b/dom/media/mediasink/VideoSink.h
new file mode 100644
index 000000000..2612f0e07
--- /dev/null
+++ b/dom/media/mediasink/VideoSink.h
@@ -0,0 +1,160 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef VideoSink_h_
+#define VideoSink_h_
+
+#include "FrameStatistics.h"
+#include "ImageContainer.h"
+#include "MediaEventSource.h"
+#include "MediaSink.h"
+#include "MediaTimer.h"
+#include "mozilla/AbstractThread.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/TimeStamp.h"
+#include "VideoFrameContainer.h"
+
+namespace mozilla {
+
+class VideoFrameContainer;
+template <class T> class MediaQueue;
+
+namespace media {
+
+class VideoSink : public MediaSink
+{
+ typedef mozilla::layers::ImageContainer::ProducerID ProducerID;
+public:
+ VideoSink(AbstractThread* aThread,
+ MediaSink* aAudioSink,
+ MediaQueue<MediaData>& aVideoQueue,
+ VideoFrameContainer* aContainer,
+ FrameStatistics& aFrameStats,
+ uint32_t aVQueueSentToCompositerSize);
+
+ const PlaybackParams& GetPlaybackParams() const override;
+
+ void SetPlaybackParams(const PlaybackParams& aParams) override;
+
+ RefPtr<GenericPromise> OnEnded(TrackType aType) override;
+
+ int64_t GetEndTime(TrackType aType) const override;
+
+ int64_t GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
+
+ bool HasUnplayedFrames(TrackType aType) const override;
+
+ void SetPlaybackRate(double aPlaybackRate) override;
+
+ void SetVolume(double aVolume) override;
+
+ void SetPreservesPitch(bool aPreservesPitch) override;
+
+ void SetPlaying(bool aPlaying) override;
+
+ void Redraw(const VideoInfo& aInfo) override;
+
+ void Start(int64_t aStartTime, const MediaInfo& aInfo) override;
+
+ void Stop() override;
+
+ bool IsStarted() const override;
+
+ bool IsPlaying() const override;
+
+ void Shutdown() override;
+
+ void DumpDebugInfo() override;
+
+private:
+ virtual ~VideoSink();
+
+ // VideoQueue listener related.
+ void OnVideoQueuePushed(RefPtr<MediaData>&& aSample);
+ void OnVideoQueueFinished();
+ void ConnectListener();
+ void DisconnectListener();
+
+ // Sets VideoQueue images into the VideoFrameContainer. Called on the shared
+ // state machine thread. The first aMaxFrames (at most) are set.
+ // aClockTime and aClockTimeStamp are used as the baseline for deriving
+ // timestamps for the frames; when omitted, aMaxFrames must be 1 and
+ // a null timestamp is passed to the VideoFrameContainer.
+ // If the VideoQueue is empty, this does nothing.
+ void RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime = 0,
+ const TimeStamp& aClickTimeStamp = TimeStamp());
+
+ // Triggered while videosink is started, videosink becomes "playing" status,
+ // or VideoQueue event arrived.
+ void TryUpdateRenderedVideoFrames();
+
+ // If we have video, display a video frame if it's time for display has
+ // arrived, otherwise sleep until it's time for the next frame. Update the
+ // current frame time as appropriate, and trigger ready state update.
+ // Called on the shared state machine thread.
+ void UpdateRenderedVideoFrames();
+ void UpdateRenderedVideoFramesByTimer();
+
+ void MaybeResolveEndPromise();
+
+ void AssertOwnerThread() const
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ }
+
+ MediaQueue<MediaData>& VideoQueue() const {
+ return mVideoQueue;
+ }
+
+ const RefPtr<AbstractThread> mOwnerThread;
+ RefPtr<MediaSink> mAudioSink;
+ MediaQueue<MediaData>& mVideoQueue;
+ VideoFrameContainer* mContainer;
+
+ // Producer ID to help ImageContainer distinguish different streams of
+ // FrameIDs. A unique and immutable value per VideoSink.
+ const ProducerID mProducerID;
+
+ // Used to notify MediaDecoder's frame statistics
+ FrameStatistics& mFrameStats;
+
+ RefPtr<GenericPromise> mEndPromise;
+ MozPromiseHolder<GenericPromise> mEndPromiseHolder;
+ MozPromiseRequestHolder<GenericPromise> mVideoSinkEndRequest;
+
+ // The presentation end time of the last video frame which has been displayed
+ // in microseconds.
+ int64_t mVideoFrameEndTime;
+
+ // Event listeners for VideoQueue
+ MediaEventListener mPushListener;
+ MediaEventListener mFinishListener;
+
+ // True if this sink is going to handle video track.
+ bool mHasVideo;
+
+ // Used to trigger another update of rendered frames in next round.
+ DelayedScheduler mUpdateScheduler;
+
+ // Max frame number sent to compositor at a time.
+ // Based on the pref value obtained in MDSM.
+ const uint32_t mVideoQueueSendToCompositorSize;
+
+ // Talos tests for the compositor require at least one frame in the
+ // video queue so that the compositor has something to composit during
+ // the talos test when the decode is stressed. We have a minimum size
+ // on the video queue in order to facilitate this talos test.
+ // Note: Normal playback should not have a queue size of more than 0,
+ // otherwise A/V sync will be ruined! *Only* make this non-zero for
+ // testing purposes.
+ const uint32_t mMinVideoQueueSize;
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/mediasink/moz.build b/dom/media/mediasink/moz.build
new file mode 100644
index 000000000..c09341374
--- /dev/null
+++ b/dom/media/mediasink/moz.build
@@ -0,0 +1,18 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ 'AudioSinkWrapper.cpp',
+ 'DecodedAudioDataSink.cpp',
+ 'DecodedStream.cpp',
+ 'OutputStreamManager.cpp',
+ 'VideoSink.cpp',
+]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']