summaryrefslogtreecommitdiffstats
path: root/dom/media/webrtc
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /dom/media/webrtc
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'dom/media/webrtc')
-rw-r--r--dom/media/webrtc/AudioOutputObserver.h64
-rw-r--r--dom/media/webrtc/MediaEngine.h485
-rw-r--r--dom/media/webrtc/MediaEngineCameraVideoSource.cpp418
-rw-r--r--dom/media/webrtc/MediaEngineCameraVideoSource.h132
-rw-r--r--dom/media/webrtc/MediaEngineDefault.cpp568
-rw-r--r--dom/media/webrtc/MediaEngineDefault.h215
-rw-r--r--dom/media/webrtc/MediaEngineRemoteVideoSource.cpp509
-rw-r--r--dom/media/webrtc/MediaEngineRemoteVideoSource.h136
-rw-r--r--dom/media/webrtc/MediaEngineTabVideoSource.cpp395
-rw-r--r--dom/media/webrtc/MediaEngineTabVideoSource.h114
-rw-r--r--dom/media/webrtc/MediaEngineWebRTC.cpp431
-rw-r--r--dom/media/webrtc/MediaEngineWebRTC.h613
-rw-r--r--dom/media/webrtc/MediaEngineWebRTCAudio.cpp937
-rw-r--r--dom/media/webrtc/MediaTrackConstraints.cpp469
-rw-r--r--dom/media/webrtc/MediaTrackConstraints.h449
-rw-r--r--dom/media/webrtc/PWebrtcGlobal.ipdl33
-rw-r--r--dom/media/webrtc/PeerIdentity.cpp86
-rw-r--r--dom/media/webrtc/PeerIdentity.h81
-rw-r--r--dom/media/webrtc/RTCCertificate.cpp462
-rw-r--r--dom/media/webrtc/RTCCertificate.h98
-rw-r--r--dom/media/webrtc/RTCIdentityProviderRegistrar.cpp90
-rw-r--r--dom/media/webrtc/RTCIdentityProviderRegistrar.h59
-rw-r--r--dom/media/webrtc/WebrtcGlobal.h497
-rw-r--r--dom/media/webrtc/moz.build88
-rw-r--r--dom/media/webrtc/nsITabSource.idl20
25 files changed, 7449 insertions, 0 deletions
diff --git a/dom/media/webrtc/AudioOutputObserver.h b/dom/media/webrtc/AudioOutputObserver.h
new file mode 100644
index 000000000..517eaa891
--- /dev/null
+++ b/dom/media/webrtc/AudioOutputObserver.h
@@ -0,0 +1,64 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AUDIOOUTPUTOBSERVER_H_
+#define AUDIOOUTPUTOBSERVER_H_
+
+#include "mozilla/StaticPtr.h"
+#include "nsAutoPtr.h"
+#include "AudioMixer.h"
+
+namespace webrtc {
+class SingleRwFifo;
+}
+
+namespace mozilla {
+
+typedef struct FarEndAudioChunk_ {
+ uint16_t mSamples;
+ bool mOverrun;
+ int16_t mData[1]; // variable-length
+} FarEndAudioChunk;
+
+// XXX Really a singleton currently
+class AudioOutputObserver : public MixerCallbackReceiver
+{
+public:
+ AudioOutputObserver();
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioOutputObserver);
+
+ void MixerCallback(AudioDataValue* aMixedBuffer,
+ AudioSampleFormat aFormat,
+ uint32_t aChannels,
+ uint32_t aFrames,
+ uint32_t aSampleRate) override;
+
+ void Clear();
+ void InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrames, bool aOverran,
+ int aFreq, int aChannels, AudioSampleFormat aFormat);
+ uint32_t PlayoutFrequency() { return mPlayoutFreq; }
+ uint32_t PlayoutChannels() { return mPlayoutChannels; }
+
+ FarEndAudioChunk *Pop();
+ uint32_t Size();
+
+private:
+ virtual ~AudioOutputObserver();
+ uint32_t mPlayoutFreq;
+ uint32_t mPlayoutChannels;
+
+ nsAutoPtr<webrtc::SingleRwFifo> mPlayoutFifo;
+ uint32_t mChunkSize;
+
+ // chunking to 10ms support
+ FarEndAudioChunk *mSaved; // can't be nsAutoPtr since we need to use free(), not delete
+ uint32_t mSamplesSaved;
+};
+
+extern StaticRefPtr<AudioOutputObserver> gFarendObserver;
+
+}
+
+#endif
diff --git a/dom/media/webrtc/MediaEngine.h b/dom/media/webrtc/MediaEngine.h
new file mode 100644
index 000000000..ff2a6e25a
--- /dev/null
+++ b/dom/media/webrtc/MediaEngine.h
@@ -0,0 +1,485 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIAENGINE_H_
+#define MEDIAENGINE_H_
+
+#include "mozilla/RefPtr.h"
+#include "DOMMediaStream.h"
+#include "MediaStreamGraph.h"
+#include "MediaTrackConstraints.h"
+#include "mozilla/dom/MediaStreamTrackBinding.h"
+#include "mozilla/dom/VideoStreamTrack.h"
+#include "mozilla/media/DeviceChangeCallback.h"
+
+namespace mozilla {
+
+namespace dom {
+class Blob;
+} // namespace dom
+
+enum {
+ kVideoTrack = 1,
+ kAudioTrack = 2,
+ kTrackCount
+};
+
+/**
+ * Abstract interface for managing audio and video devices. Each platform
+ * must implement a concrete class that will map these classes and methods
+ * to the appropriate backend. For example, on Desktop platforms, these will
+ * correspond to equivalent webrtc (GIPS) calls, and on B2G they will map to
+ * a Gonk interface.
+ */
+class MediaEngineVideoSource;
+class MediaEngineAudioSource;
+
+enum MediaEngineState {
+ kAllocated,
+ kStarted,
+ kStopped,
+ kReleased
+};
+
+class MediaEngine : public DeviceChangeCallback
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaEngine)
+
+ static const int DEFAULT_VIDEO_FPS = 30;
+ static const int DEFAULT_VIDEO_MIN_FPS = 10;
+ static const int DEFAULT_43_VIDEO_WIDTH = 640;
+ static const int DEFAULT_43_VIDEO_HEIGHT = 480;
+ static const int DEFAULT_169_VIDEO_WIDTH = 1280;
+ static const int DEFAULT_169_VIDEO_HEIGHT = 720;
+
+#ifndef MOZ_B2G
+ static const int DEFAULT_SAMPLE_RATE = 32000;
+#else
+ static const int DEFAULT_SAMPLE_RATE = 16000;
+#endif
+ // This allows using whatever rate the graph is using for the
+ // MediaStreamTrack. This is useful for microphone data, we know it's already
+ // at the correct rate for insertion in the MSG.
+ static const int USE_GRAPH_RATE = -1;
+
+ /* Populate an array of video sources in the nsTArray. Also include devices
+ * that are currently unavailable. */
+ virtual void EnumerateVideoDevices(dom::MediaSourceEnum,
+ nsTArray<RefPtr<MediaEngineVideoSource> >*) = 0;
+
+ /* Populate an array of audio sources in the nsTArray. Also include devices
+ * that are currently unavailable. */
+ virtual void EnumerateAudioDevices(dom::MediaSourceEnum,
+ nsTArray<RefPtr<MediaEngineAudioSource> >*) = 0;
+
+ virtual void Shutdown() = 0;
+
+ virtual void SetFakeDeviceChangeEvents() {}
+
+protected:
+ virtual ~MediaEngine() {}
+};
+
+/**
+ * Video source and friends.
+ */
+class MediaEnginePrefs {
+public:
+ MediaEnginePrefs()
+ : mWidth(0)
+ , mHeight(0)
+ , mFPS(0)
+ , mMinFPS(0)
+ , mFreq(0)
+ , mAecOn(false)
+ , mAgcOn(false)
+ , mNoiseOn(false)
+ , mAec(0)
+ , mAgc(0)
+ , mNoise(0)
+ , mPlayoutDelay(0)
+ , mFullDuplex(false)
+ , mExtendedFilter(false)
+ , mDelayAgnostic(false)
+ , mFakeDeviceChangeEventOn(false)
+ {}
+
+ int32_t mWidth;
+ int32_t mHeight;
+ int32_t mFPS;
+ int32_t mMinFPS;
+ int32_t mFreq; // for test tones (fake:true)
+ bool mAecOn;
+ bool mAgcOn;
+ bool mNoiseOn;
+ int32_t mAec;
+ int32_t mAgc;
+ int32_t mNoise;
+ int32_t mPlayoutDelay;
+ bool mFullDuplex;
+ bool mExtendedFilter;
+ bool mDelayAgnostic;
+ bool mFakeDeviceChangeEventOn;
+
+ // mWidth and/or mHeight may be zero (=adaptive default), so use functions.
+
+ int32_t GetWidth(bool aHD = false) const {
+ return mWidth? mWidth : (mHeight?
+ (mHeight * GetDefWidth(aHD)) / GetDefHeight(aHD) :
+ GetDefWidth(aHD));
+ }
+
+ int32_t GetHeight(bool aHD = false) const {
+ return mHeight? mHeight : (mWidth?
+ (mWidth * GetDefHeight(aHD)) / GetDefWidth(aHD) :
+ GetDefHeight(aHD));
+ }
+private:
+ static int32_t GetDefWidth(bool aHD = false) {
+ // It'd be nice if we could use the ternary operator here, but we can't
+ // because of bug 1002729.
+ if (aHD) {
+ return MediaEngine::DEFAULT_169_VIDEO_WIDTH;
+ }
+
+ return MediaEngine::DEFAULT_43_VIDEO_WIDTH;
+ }
+
+ static int32_t GetDefHeight(bool aHD = false) {
+ // It'd be nice if we could use the ternary operator here, but we can't
+ // because of bug 1002729.
+ if (aHD) {
+ return MediaEngine::DEFAULT_169_VIDEO_HEIGHT;
+ }
+
+ return MediaEngine::DEFAULT_43_VIDEO_HEIGHT;
+ }
+};
+
+/**
+ * Callback interface for TakePhoto(). Either PhotoComplete() or PhotoError()
+ * should be called.
+ */
+class MediaEnginePhotoCallback {
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaEnginePhotoCallback)
+
+ // aBlob is the image captured by MediaEngineSource. It is
+ // called on main thread.
+ virtual nsresult PhotoComplete(already_AddRefed<dom::Blob> aBlob) = 0;
+
+ // It is called on main thread. aRv is the error code.
+ virtual nsresult PhotoError(nsresult aRv) = 0;
+
+protected:
+ virtual ~MediaEnginePhotoCallback() {}
+};
+
+/**
+ * Common abstract base class for audio and video sources.
+ *
+ * By default, the base class implements Allocate and Deallocate using its
+ * UpdateSingleSource pattern, which manages allocation handles and calculates
+ * net constraints from competing allocations and updates a single shared device.
+ *
+ * Classes that don't operate as a single shared device can override Allocate
+ * and Deallocate and simply not pass the methods up.
+ */
+class MediaEngineSource : public nsISupports,
+ protected MediaConstraintsHelper
+{
+public:
+ // code inside webrtc.org assumes these sizes; don't use anything smaller
+ // without verifying it's ok
+ static const unsigned int kMaxDeviceNameLength = 128;
+ static const unsigned int kMaxUniqueIdLength = 256;
+
+ virtual ~MediaEngineSource()
+ {
+ if (!mInShutdown) {
+ Shutdown();
+ }
+ }
+
+ virtual void Shutdown()
+ {
+ mInShutdown = true;
+ };
+
+ /* Populate the human readable name of this device in the nsAString */
+ virtual void GetName(nsAString&) const = 0;
+
+ /* Populate the UUID of this device in the nsACString */
+ virtual void GetUUID(nsACString&) const = 0;
+
+ /* Override w/true if source does end-run around cross origin restrictions. */
+ virtual bool GetScary() const { return false; };
+
+ class AllocationHandle
+ {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AllocationHandle);
+ protected:
+ ~AllocationHandle() {}
+ public:
+ AllocationHandle(const dom::MediaTrackConstraints& aConstraints,
+ const nsACString& aOrigin,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId)
+ : mConstraints(aConstraints),
+ mOrigin(aOrigin),
+ mPrefs(aPrefs),
+ mDeviceId(aDeviceId) {}
+ public:
+ NormalizedConstraints mConstraints;
+ nsCString mOrigin;
+ MediaEnginePrefs mPrefs;
+ nsString mDeviceId;
+ };
+
+ /* Release the device back to the system. */
+ virtual nsresult Deallocate(AllocationHandle* aHandle)
+ {
+ MOZ_ASSERT(aHandle);
+ RefPtr<AllocationHandle> handle = aHandle;
+
+ class Comparator {
+ public:
+ static bool Equals(const RefPtr<AllocationHandle>& a,
+ const RefPtr<AllocationHandle>& b) {
+ return a.get() == b.get();
+ }
+ };
+
+ auto ix = mRegisteredHandles.IndexOf(handle, 0, Comparator());
+ if (ix == mRegisteredHandles.NoIndex) {
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ mRegisteredHandles.RemoveElementAt(ix);
+ if (mRegisteredHandles.Length() && !mInShutdown) {
+ // Whenever constraints are removed, other parties may get closer to ideal.
+ auto& first = mRegisteredHandles[0];
+ const char* badConstraint = nullptr;
+ return ReevaluateAllocation(nullptr, nullptr, first->mPrefs,
+ first->mDeviceId, &badConstraint);
+ }
+ return NS_OK;
+ }
+
+ /* Start the device and add the track to the provided SourceMediaStream, with
+ * the provided TrackID. You may start appending data to the track
+ * immediately after. */
+ virtual nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) = 0;
+
+ /* tell the source if there are any direct listeners attached */
+ virtual void SetDirectListeners(bool) = 0;
+
+ /* Called when the stream wants more data */
+ virtual void NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream *aSource,
+ TrackID aId,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle) = 0;
+
+ /* Stop the device and release the corresponding MediaStream */
+ virtual nsresult Stop(SourceMediaStream *aSource, TrackID aID) = 0;
+
+ /* Restart with new capability */
+ virtual nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) = 0;
+
+ /* Returns true if a source represents a fake capture device and
+ * false otherwise
+ */
+ virtual bool IsFake() = 0;
+
+ /* Returns the type of media source (camera, microphone, screen, window, etc) */
+ virtual dom::MediaSourceEnum GetMediaSource() const = 0;
+
+ /* If implementation of MediaEngineSource supports TakePhoto(), the picture
+ * should be return via aCallback object. Otherwise, it returns NS_ERROR_NOT_IMPLEMENTED.
+ * Currently, only Gonk MediaEngineSource implementation supports it.
+ */
+ virtual nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) = 0;
+
+ /* Return false if device is currently allocated or started */
+ bool IsAvailable() {
+ if (mState == kAllocated || mState == kStarted) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ /* It is an error to call Start() before an Allocate(), and Stop() before
+ * a Start(). Only Allocate() may be called after a Deallocate(). */
+
+ /* This call reserves but does not start the device. */
+ virtual nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint)
+ {
+ AssertIsOnOwningThread();
+ MOZ_ASSERT(aOutHandle);
+ RefPtr<AllocationHandle> handle = new AllocationHandle(aConstraints, aOrigin,
+ aPrefs, aDeviceId);
+ nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId,
+ aOutBadConstraint);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ mRegisteredHandles.AppendElement(handle);
+ handle.forget(aOutHandle);
+ return NS_OK;
+ }
+
+ virtual uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const = 0;
+
+ void GetSettings(dom::MediaTrackSettings& aOutSettings)
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ aOutSettings = mSettings;
+ }
+
+protected:
+ // Only class' own members can be initialized in constructor initializer list.
+ explicit MediaEngineSource(MediaEngineState aState)
+ : mState(aState)
+#ifdef DEBUG
+ , mOwningThread(PR_GetCurrentThread())
+#endif
+ , mInShutdown(false)
+ {}
+
+ /* UpdateSingleSource - Centralized abstract function to implement in those
+ * cases where a single device is being shared between users. Should apply net
+ * constraints and restart the device as needed.
+ *
+ * aHandle - New or existing handle, or null to update after removal.
+ * aNetConstraints - Net constraints to be applied to the single device.
+ * aPrefs - As passed in (in case of changes in about:config).
+ * aDeviceId - As passed in (origin dependent).
+ * aOutBadConstraint - Result: nonzero if failed to apply. Name of culprit.
+ */
+
+ virtual nsresult
+ UpdateSingleSource(const AllocationHandle* aHandle,
+ const NormalizedConstraints& aNetConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ };
+
+ /* ReevaluateAllocation - Call to change constraints for an allocation of
+ * a single device. Manages allocation handles, calculates net constraints
+ * from all competing allocations, and calls UpdateSingleSource with the net
+ * result, to restart the single device as needed.
+ *
+ * aHandle - New or existing handle, or null to update after removal.
+ * aConstraintsUpdate - Constraints to be applied to existing handle, or null.
+ * aPrefs - As passed in (in case of changes from about:config).
+ * aDeviceId - As passed in (origin-dependent id).
+ * aOutBadConstraint - Result: nonzero if failed to apply. Name of culprit.
+ */
+
+ nsresult
+ ReevaluateAllocation(AllocationHandle* aHandle,
+ NormalizedConstraints* aConstraintsUpdate,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+ {
+ // aHandle and/or aConstraintsUpdate may be nullptr (see below)
+
+ AutoTArray<const NormalizedConstraints*, 10> allConstraints;
+ for (auto& registered : mRegisteredHandles) {
+ if (aConstraintsUpdate && registered.get() == aHandle) {
+ continue; // Don't count old constraints
+ }
+ allConstraints.AppendElement(&registered->mConstraints);
+ }
+ if (aConstraintsUpdate) {
+ allConstraints.AppendElement(aConstraintsUpdate);
+ } else if (aHandle) {
+ // In the case of AddShareOfSingleSource, the handle isn't registered yet.
+ allConstraints.AppendElement(&aHandle->mConstraints);
+ }
+
+ NormalizedConstraints netConstraints(allConstraints);
+ if (netConstraints.mBadConstraint) {
+ *aOutBadConstraint = netConstraints.mBadConstraint;
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult rv = UpdateSingleSource(aHandle, netConstraints, aPrefs, aDeviceId,
+ aOutBadConstraint);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (aHandle && aConstraintsUpdate) {
+ aHandle->mConstraints = *aConstraintsUpdate;
+ }
+ return NS_OK;
+ }
+
+ void AssertIsOnOwningThread()
+ {
+ MOZ_ASSERT(PR_GetCurrentThread() == mOwningThread);
+ }
+
+ MediaEngineState mState;
+#ifdef DEBUG
+ PRThread* mOwningThread;
+#endif
+ nsTArray<RefPtr<AllocationHandle>> mRegisteredHandles;
+ bool mInShutdown;
+
+ // Main-thread only:
+ dom::MediaTrackSettings mSettings;
+};
+
+class MediaEngineVideoSource : public MediaEngineSource
+{
+public:
+ virtual ~MediaEngineVideoSource() {}
+
+protected:
+ explicit MediaEngineVideoSource(MediaEngineState aState)
+ : MediaEngineSource(aState) {}
+ MediaEngineVideoSource()
+ : MediaEngineSource(kReleased) {}
+};
+
+/**
+ * Audio source and friends.
+ */
+class MediaEngineAudioSource : public MediaEngineSource,
+ public AudioDataListenerInterface
+{
+public:
+ virtual ~MediaEngineAudioSource() {}
+
+protected:
+ explicit MediaEngineAudioSource(MediaEngineState aState)
+ : MediaEngineSource(aState) {}
+ MediaEngineAudioSource()
+ : MediaEngineSource(kReleased) {}
+
+};
+
+} // namespace mozilla
+
+#endif /* MEDIAENGINE_H_ */
diff --git a/dom/media/webrtc/MediaEngineCameraVideoSource.cpp b/dom/media/webrtc/MediaEngineCameraVideoSource.cpp
new file mode 100644
index 000000000..a0f31d937
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.cpp
@@ -0,0 +1,418 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaEngineCameraVideoSource.h"
+
+#include <limits>
+
+namespace mozilla {
+
+using namespace mozilla::gfx;
+using namespace mozilla::dom;
+
+extern LogModule* GetMediaManagerLog();
+#define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
+#define LOGFRAME(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
+
+// guts for appending data to the MSG track
+bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
+ layers::Image* aImage,
+ TrackID aID,
+ StreamTime delta,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ MOZ_ASSERT(aSource);
+
+ VideoSegment segment;
+ RefPtr<layers::Image> image = aImage;
+ IntSize size(image ? mWidth : 0, image ? mHeight : 0);
+ segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
+
+ // This is safe from any thread, and is safe if the track is Finished
+ // or Destroyed.
+ // This can fail if either a) we haven't added the track yet, or b)
+ // we've removed or finished the track.
+ return aSource->AppendToTrack(aID, &(segment));
+}
+
+// Sub-classes (B2G or desktop) should overload one of both of these two methods
+// to provide capabilities
+size_t
+MediaEngineCameraVideoSource::NumCapabilities() const
+{
+ return mHardcodedCapabilities.Length();
+}
+
+void
+MediaEngineCameraVideoSource::GetCapability(size_t aIndex,
+ webrtc::CaptureCapability& aOut) const
+{
+ MOZ_ASSERT(aIndex < mHardcodedCapabilities.Length());
+ aOut = mHardcodedCapabilities.SafeElementAt(aIndex, webrtc::CaptureCapability());
+}
+
+uint32_t
+MediaEngineCameraVideoSource::GetFitnessDistance(
+ const webrtc::CaptureCapability& aCandidate,
+ const NormalizedConstraintSet &aConstraints,
+ const nsString& aDeviceId) const
+{
+ // Treat width|height|frameRate == 0 on capability as "can do any".
+ // This allows for orthogonal capabilities that are not in discrete steps.
+
+ uint64_t distance =
+ uint64_t(FitnessDistance(aDeviceId, aConstraints.mDeviceId)) +
+ uint64_t(FitnessDistance(mFacingMode, aConstraints.mFacingMode)) +
+ uint64_t(aCandidate.width? FitnessDistance(int32_t(aCandidate.width),
+ aConstraints.mWidth) : 0) +
+ uint64_t(aCandidate.height? FitnessDistance(int32_t(aCandidate.height),
+ aConstraints.mHeight) : 0) +
+ uint64_t(aCandidate.maxFPS? FitnessDistance(double(aCandidate.maxFPS),
+ aConstraints.mFrameRate) : 0);
+ return uint32_t(std::min(distance, uint64_t(UINT32_MAX)));
+}
+
+// Find best capability by removing inferiors. May leave >1 of equal distance
+
+/* static */ void
+MediaEngineCameraVideoSource::TrimLessFitCandidates(CapabilitySet& set) {
+ uint32_t best = UINT32_MAX;
+ for (auto& candidate : set) {
+ if (best > candidate.mDistance) {
+ best = candidate.mDistance;
+ }
+ }
+ for (size_t i = 0; i < set.Length();) {
+ if (set[i].mDistance > best) {
+ set.RemoveElementAt(i);
+ } else {
+ ++i;
+ }
+ }
+ MOZ_ASSERT(set.Length());
+}
+
+// GetBestFitnessDistance returns the best distance the capture device can offer
+// as a whole, given an accumulated number of ConstraintSets.
+// Ideal values are considered in the first ConstraintSet only.
+// Plain values are treated as Ideal in the first ConstraintSet.
+// Plain values are treated as Exact in subsequent ConstraintSets.
+// Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
+// A finite result may be used to calculate this device's ranking as a choice.
+
+uint32_t
+MediaEngineCameraVideoSource::GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const
+{
+ size_t num = NumCapabilities();
+
+ CapabilitySet candidateSet;
+ for (size_t i = 0; i < num; i++) {
+ candidateSet.AppendElement(i);
+ }
+
+ bool first = true;
+ for (const NormalizedConstraintSet* ns : aConstraintSets) {
+ for (size_t i = 0; i < candidateSet.Length(); ) {
+ auto& candidate = candidateSet[i];
+ webrtc::CaptureCapability cap;
+ GetCapability(candidate.mIndex, cap);
+ uint32_t distance = GetFitnessDistance(cap, *ns, aDeviceId);
+ if (distance == UINT32_MAX) {
+ candidateSet.RemoveElementAt(i);
+ } else {
+ ++i;
+ if (first) {
+ candidate.mDistance = distance;
+ }
+ }
+ }
+ first = false;
+ }
+ if (!candidateSet.Length()) {
+ return UINT32_MAX;
+ }
+ TrimLessFitCandidates(candidateSet);
+ return candidateSet[0].mDistance;
+}
+
+void
+MediaEngineCameraVideoSource::LogConstraints(
+ const NormalizedConstraintSet& aConstraints)
+{
+ auto& c = aConstraints;
+ LOG(((c.mWidth.mIdeal.isSome()?
+ "Constraints: width: { min: %d, max: %d, ideal: %d }" :
+ "Constraints: width: { min: %d, max: %d }"),
+ c.mWidth.mMin, c.mWidth.mMax,
+ c.mWidth.mIdeal.valueOr(0)));
+ LOG(((c.mHeight.mIdeal.isSome()?
+ " height: { min: %d, max: %d, ideal: %d }" :
+ " height: { min: %d, max: %d }"),
+ c.mHeight.mMin, c.mHeight.mMax,
+ c.mHeight.mIdeal.valueOr(0)));
+ LOG(((c.mFrameRate.mIdeal.isSome()?
+ " frameRate: { min: %f, max: %f, ideal: %f }" :
+ " frameRate: { min: %f, max: %f }"),
+ c.mFrameRate.mMin, c.mFrameRate.mMax,
+ c.mFrameRate.mIdeal.valueOr(0)));
+}
+
+void
+MediaEngineCameraVideoSource::LogCapability(const char* aHeader,
+ const webrtc::CaptureCapability &aCapability, uint32_t aDistance)
+{
+ // RawVideoType and VideoCodecType media/webrtc/trunk/webrtc/common_types.h
+ static const char* const types[] = {
+ "I420",
+ "YV12",
+ "YUY2",
+ "UYVY",
+ "IYUV",
+ "ARGB",
+ "RGB24",
+ "RGB565",
+ "ARGB4444",
+ "ARGB1555",
+ "MJPEG",
+ "NV12",
+ "NV21",
+ "BGRA",
+ "Unknown type"
+ };
+
+ static const char* const codec[] = {
+ "VP8",
+ "VP9",
+ "H264",
+ "I420",
+ "RED",
+ "ULPFEC",
+ "Generic codec",
+ "Unknown codec"
+ };
+
+ LOG(("%s: %4u x %4u x %2u maxFps, %s, %s. Distance = %lu",
+ aHeader, aCapability.width, aCapability.height, aCapability.maxFPS,
+ types[std::min(std::max(uint32_t(0), uint32_t(aCapability.rawType)),
+ uint32_t(sizeof(types) / sizeof(*types) - 1))],
+ codec[std::min(std::max(uint32_t(0), uint32_t(aCapability.codecType)),
+ uint32_t(sizeof(codec) / sizeof(*codec) - 1))],
+ aDistance));
+}
+
+bool
+MediaEngineCameraVideoSource::ChooseCapability(
+ const NormalizedConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId)
+{
+ if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
+ LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
+ aPrefs.GetWidth(), aPrefs.GetHeight(),
+ aPrefs.mFPS, aPrefs.mMinFPS));
+ LogConstraints(aConstraints);
+ if (aConstraints.mAdvanced.size()) {
+ LOG(("Advanced array[%u]:", aConstraints.mAdvanced.size()));
+ for (auto& advanced : aConstraints.mAdvanced) {
+ LogConstraints(advanced);
+ }
+ }
+ }
+
+ size_t num = NumCapabilities();
+
+ CapabilitySet candidateSet;
+ for (size_t i = 0; i < num; i++) {
+ candidateSet.AppendElement(i);
+ }
+
+ // First, filter capabilities by required constraints (min, max, exact).
+
+ for (size_t i = 0; i < candidateSet.Length();) {
+ auto& candidate = candidateSet[i];
+ webrtc::CaptureCapability cap;
+ GetCapability(candidate.mIndex, cap);
+ candidate.mDistance = GetFitnessDistance(cap, aConstraints, aDeviceId);
+ LogCapability("Capability", cap, candidate.mDistance);
+ if (candidate.mDistance == UINT32_MAX) {
+ candidateSet.RemoveElementAt(i);
+ } else {
+ ++i;
+ }
+ }
+
+ if (!candidateSet.Length()) {
+ LOG(("failed to find capability match from %d choices",num));
+ return false;
+ }
+
+ // Filter further with all advanced constraints (that don't overconstrain).
+
+ for (const auto &cs : aConstraints.mAdvanced) {
+ CapabilitySet rejects;
+ for (size_t i = 0; i < candidateSet.Length();) {
+ auto& candidate = candidateSet[i];
+ webrtc::CaptureCapability cap;
+ GetCapability(candidate.mIndex, cap);
+ if (GetFitnessDistance(cap, cs, aDeviceId) == UINT32_MAX) {
+ rejects.AppendElement(candidate);
+ candidateSet.RemoveElementAt(i);
+ } else {
+ ++i;
+ }
+ }
+ if (!candidateSet.Length()) {
+ candidateSet.AppendElements(Move(rejects));
+ }
+ }
+ MOZ_ASSERT(candidateSet.Length(),
+ "advanced constraints filtering step can't reduce candidates to zero");
+
+ // Remaining algorithm is up to the UA.
+
+ TrimLessFitCandidates(candidateSet);
+
+ // Any remaining multiples all have the same distance. A common case of this
+ // occurs when no ideal is specified. Lean toward defaults.
+ uint32_t sameDistance = candidateSet[0].mDistance;
+ {
+ MediaTrackConstraintSet prefs;
+ prefs.mWidth.SetAsLong() = aPrefs.GetWidth();
+ prefs.mHeight.SetAsLong() = aPrefs.GetHeight();
+ prefs.mFrameRate.SetAsDouble() = aPrefs.mFPS;
+ NormalizedConstraintSet normPrefs(prefs, false);
+
+ for (auto& candidate : candidateSet) {
+ webrtc::CaptureCapability cap;
+ GetCapability(candidate.mIndex, cap);
+ candidate.mDistance = GetFitnessDistance(cap, normPrefs, aDeviceId);
+ }
+ TrimLessFitCandidates(candidateSet);
+ }
+
+ // Any remaining multiples all have the same distance, but may vary on
+ // format. Some formats are more desirable for certain use like WebRTC.
+ // E.g. I420 over RGB24 can remove a needless format conversion.
+
+ bool found = false;
+ for (auto& candidate : candidateSet) {
+ webrtc::CaptureCapability cap;
+ GetCapability(candidate.mIndex, cap);
+ if (cap.rawType == webrtc::RawVideoType::kVideoI420 ||
+ cap.rawType == webrtc::RawVideoType::kVideoYUY2 ||
+ cap.rawType == webrtc::RawVideoType::kVideoYV12) {
+ mCapability = cap;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ GetCapability(candidateSet[0].mIndex, mCapability);
+ }
+
+ LogCapability("Chosen capability", mCapability, sameDistance);
+ return true;
+}
+
+void
+MediaEngineCameraVideoSource::SetName(nsString aName)
+{
+ mDeviceName = aName;
+ bool hasFacingMode = false;
+ VideoFacingModeEnum facingMode = VideoFacingModeEnum::User;
+
+ // Set facing mode based on device name.
+#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
+ // Names are generated. Example: "Camera 0, Facing back, Orientation 90"
+ //
+ // See media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/
+ // webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
+
+ if (aName.Find(NS_LITERAL_STRING("Facing back")) != kNotFound) {
+ hasFacingMode = true;
+ facingMode = VideoFacingModeEnum::Environment;
+ } else if (aName.Find(NS_LITERAL_STRING("Facing front")) != kNotFound) {
+ hasFacingMode = true;
+ facingMode = VideoFacingModeEnum::User;
+ }
+#endif // ANDROID
+#ifdef XP_MACOSX
+ // Kludge to test user-facing cameras on OSX.
+ if (aName.Find(NS_LITERAL_STRING("Face")) != -1) {
+ hasFacingMode = true;
+ facingMode = VideoFacingModeEnum::User;
+ }
+#endif
+#ifdef XP_WIN
+ // The cameras' name of Surface book are "Microsoft Camera Front" and
+ // "Microsoft Camera Rear" respectively.
+
+ if (aName.Find(NS_LITERAL_STRING("Front")) != kNotFound) {
+ hasFacingMode = true;
+ facingMode = VideoFacingModeEnum::User;
+ } else if (aName.Find(NS_LITERAL_STRING("Rear")) != kNotFound) {
+ hasFacingMode = true;
+ facingMode = VideoFacingModeEnum::Environment;
+ }
+#endif // WINDOWS
+ if (hasFacingMode) {
+ mFacingMode.Assign(NS_ConvertUTF8toUTF16(
+ VideoFacingModeEnumValues::strings[uint32_t(facingMode)].value));
+ } else {
+ mFacingMode.Truncate();
+ }
+}
+
+void
+MediaEngineCameraVideoSource::GetName(nsAString& aName) const
+{
+ aName = mDeviceName;
+}
+
+void
+MediaEngineCameraVideoSource::SetUUID(const char* aUUID)
+{
+ mUniqueId.Assign(aUUID);
+}
+
+void
+MediaEngineCameraVideoSource::GetUUID(nsACString& aUUID) const
+{
+ aUUID = mUniqueId;
+}
+
+const nsCString&
+MediaEngineCameraVideoSource::GetUUID() const
+{
+ return mUniqueId;
+}
+
+void
+MediaEngineCameraVideoSource::SetDirectListeners(bool aHasDirectListeners)
+{
+ LOG((__FUNCTION__));
+ mHasDirectListeners = aHasDirectListeners;
+}
+
+bool operator == (const webrtc::CaptureCapability& a,
+ const webrtc::CaptureCapability& b)
+{
+ return a.width == b.width &&
+ a.height == b.height &&
+ a.maxFPS == b.maxFPS &&
+ a.rawType == b.rawType &&
+ a.codecType == b.codecType &&
+ a.expectedCaptureDelay == b.expectedCaptureDelay &&
+ a.interlaced == b.interlaced;
+};
+
+bool operator != (const webrtc::CaptureCapability& a,
+ const webrtc::CaptureCapability& b)
+{
+ return !(a == b);
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/MediaEngineCameraVideoSource.h b/dom/media/webrtc/MediaEngineCameraVideoSource.h
new file mode 100644
index 000000000..fb9113cd6
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.h
@@ -0,0 +1,132 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaEngineCameraVideoSource_h
+#define MediaEngineCameraVideoSource_h
+
+#include "MediaEngine.h"
+
+#include "nsDirectoryServiceDefs.h"
+
+// conflicts with #include of scoped_ptr.h
+#undef FF
+#include "webrtc/video_engine/include/vie_capture.h"
+
+namespace mozilla {
+
+bool operator == (const webrtc::CaptureCapability& a,
+ const webrtc::CaptureCapability& b);
+bool operator != (const webrtc::CaptureCapability& a,
+ const webrtc::CaptureCapability& b);
+
+class MediaEngineCameraVideoSource : public MediaEngineVideoSource
+{
+public:
+ // Some subclasses use an index to track multiple instances.
+ explicit MediaEngineCameraVideoSource(int aIndex,
+ const char* aMonitorName = "Camera.Monitor")
+ : MediaEngineVideoSource(kReleased)
+ , mMonitor(aMonitorName)
+ , mWidth(0)
+ , mHeight(0)
+ , mInitDone(false)
+ , mHasDirectListeners(false)
+ , mCaptureIndex(aIndex)
+ , mTrackID(0)
+ {}
+
+ explicit MediaEngineCameraVideoSource(const char* aMonitorName = "Camera.Monitor")
+ : MediaEngineCameraVideoSource(0, aMonitorName) {}
+
+ void GetName(nsAString& aName) const override;
+ void GetUUID(nsACString& aUUID) const override;
+ void SetDirectListeners(bool aHasListeners) override;
+
+ bool IsFake() override
+ {
+ return false;
+ }
+
+ nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const override;
+
+ void Shutdown() override {};
+
+protected:
+ struct CapabilityCandidate {
+ explicit CapabilityCandidate(uint8_t index, uint32_t distance = 0)
+ : mIndex(index), mDistance(distance) {}
+
+ size_t mIndex;
+ uint32_t mDistance;
+ };
+ typedef nsTArray<CapabilityCandidate> CapabilitySet;
+
+ ~MediaEngineCameraVideoSource() {}
+
+ // guts for appending data to the MSG track
+ virtual bool AppendToTrack(SourceMediaStream* aSource,
+ layers::Image* aImage,
+ TrackID aID,
+ StreamTime delta,
+ const PrincipalHandle& aPrincipalHandle);
+ uint32_t GetFitnessDistance(const webrtc::CaptureCapability& aCandidate,
+ const NormalizedConstraintSet &aConstraints,
+ const nsString& aDeviceId) const;
+ static void TrimLessFitCandidates(CapabilitySet& set);
+ static void LogConstraints(const NormalizedConstraintSet& aConstraints);
+ static void LogCapability(const char* aHeader,
+ const webrtc::CaptureCapability &aCapability,
+ uint32_t aDistance);
+ virtual size_t NumCapabilities() const;
+ virtual void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) const;
+ virtual bool ChooseCapability(const NormalizedConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId);
+ void SetName(nsString aName);
+ void SetUUID(const char* aUUID);
+ const nsCString& GetUUID() const; // protected access
+
+ // Engine variables.
+
+ // mMonitor protects mImage access/changes, and transitions of mState
+ // from kStarted to kStopped (which are combined with EndTrack() and
+ // image changes).
+ // mMonitor also protects mSources[] and mPrincipalHandles[] access/changes.
+ // mSources[] and mPrincipalHandles[] are accessed from webrtc threads.
+
+ // All the mMonitor accesses are from the child classes.
+ Monitor mMonitor; // Monitor for processing Camera frames.
+ nsTArray<RefPtr<SourceMediaStream>> mSources; // When this goes empty, we shut down HW
+ nsTArray<PrincipalHandle> mPrincipalHandles; // Directly mapped to mSources.
+ RefPtr<layers::Image> mImage;
+ RefPtr<layers::ImageContainer> mImageContainer;
+ int mWidth, mHeight; // protected with mMonitor on Gonk due to different threading
+ // end of data protected by mMonitor
+
+
+ bool mInitDone;
+ bool mHasDirectListeners;
+ int mCaptureIndex;
+ TrackID mTrackID;
+
+ webrtc::CaptureCapability mCapability;
+
+ mutable nsTArray<webrtc::CaptureCapability> mHardcodedCapabilities;
+private:
+ nsString mDeviceName;
+ nsCString mUniqueId;
+ nsString mFacingMode;
+};
+
+
+} // namespace mozilla
+
+#endif // MediaEngineCameraVideoSource_h
diff --git a/dom/media/webrtc/MediaEngineDefault.cpp b/dom/media/webrtc/MediaEngineDefault.cpp
new file mode 100644
index 000000000..9c97d197f
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineDefault.cpp
@@ -0,0 +1,568 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaEngineDefault.h"
+
+#include "nsCOMPtr.h"
+#include "mozilla/dom/File.h"
+#include "mozilla/UniquePtr.h"
+#include "nsILocalFile.h"
+#include "Layers.h"
+#include "ImageContainer.h"
+#include "ImageTypes.h"
+#include "prmem.h"
+#include "nsContentUtils.h"
+#include "MediaStreamGraph.h"
+
+#include "nsIFilePicker.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+
+#ifdef MOZ_WIDGET_ANDROID
+#include "nsISupportsUtils.h"
+#endif
+
+#ifdef MOZ_WEBRTC
+#include "YuvStamper.h"
+#endif
+
+#define AUDIO_RATE mozilla::MediaEngine::DEFAULT_SAMPLE_RATE
+#define DEFAULT_AUDIO_TIMER_MS 10
+namespace mozilla {
+
+using namespace mozilla::gfx;
+
+NS_IMPL_ISUPPORTS(MediaEngineDefaultVideoSource, nsITimerCallback)
+/**
+ * Default video source.
+ */
+
+MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource()
+#ifdef MOZ_WEBRTC
+ : MediaEngineCameraVideoSource("FakeVideo.Monitor")
+#else
+ : MediaEngineVideoSource()
+#endif
+ , mTimer(nullptr)
+ , mMonitor("Fake video")
+ , mCb(16), mCr(16)
+{
+ mImageContainer =
+ layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
+}
+
+MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource()
+{}
+
+void
+MediaEngineDefaultVideoSource::GetName(nsAString& aName) const
+{
+ aName.AssignLiteral(u"Default Video Device");
+ return;
+}
+
+void
+MediaEngineDefaultVideoSource::GetUUID(nsACString& aUUID) const
+{
+ aUUID.AssignLiteral("1041FCBD-3F12-4F7B-9E9B-1EC556DD5676");
+ return;
+}
+
+uint32_t
+MediaEngineDefaultVideoSource::GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const
+{
+ uint32_t distance = 0;
+#ifdef MOZ_WEBRTC
+ for (const auto* cs : aConstraintSets) {
+ distance = GetMinimumFitnessDistance(*cs, aDeviceId);
+ break; // distance is read from first entry only
+ }
+#endif
+ return distance;
+}
+
+nsresult
+MediaEngineDefaultVideoSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint)
+{
+ if (mState != kReleased) {
+ return NS_ERROR_FAILURE;
+ }
+
+ FlattenedConstraints c(aConstraints);
+
+ // Mock failure for automated tests.
+ if (c.mDeviceId.mIdeal.find(NS_LITERAL_STRING("bad device")) !=
+ c.mDeviceId.mIdeal.end()) {
+ return NS_ERROR_FAILURE;
+ }
+
+
+ // emulator debug is very, very slow; reduce load on it with smaller/slower fake video
+ mOpts = aPrefs;
+ mOpts.mWidth = c.mWidth.Get(aPrefs.mWidth ? aPrefs.mWidth :
+#ifdef DEBUG
+ MediaEngine::DEFAULT_43_VIDEO_WIDTH/2
+#else
+ MediaEngine::DEFAULT_43_VIDEO_WIDTH
+#endif
+ );
+ mOpts.mHeight = c.mHeight.Get(aPrefs.mHeight ? aPrefs.mHeight :
+#ifdef DEBUG
+ MediaEngine::DEFAULT_43_VIDEO_HEIGHT/2
+#else
+ MediaEngine::DEFAULT_43_VIDEO_HEIGHT
+#endif
+ );
+ mOpts.mWidth = std::max(160, std::min(mOpts.mWidth, 4096));
+ mOpts.mHeight = std::max(90, std::min(mOpts.mHeight, 2160));
+ mState = kAllocated;
+ *aOutHandle = nullptr;
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultVideoSource::Deallocate(AllocationHandle* aHandle)
+{
+ MOZ_ASSERT(!aHandle);
+ if (mState != kStopped && mState != kAllocated) {
+ return NS_ERROR_FAILURE;
+ }
+ mState = kReleased;
+ mImage = nullptr;
+ return NS_OK;
+}
+
+static void AllocateSolidColorFrame(layers::PlanarYCbCrData& aData,
+ int aWidth, int aHeight,
+ int aY, int aCb, int aCr)
+{
+ MOZ_ASSERT(!(aWidth&1));
+ MOZ_ASSERT(!(aHeight&1));
+ // Allocate a single frame with a solid color
+ int yLen = aWidth*aHeight;
+ int cbLen = yLen>>2;
+ int crLen = cbLen;
+ uint8_t* frame = (uint8_t*) PR_Malloc(yLen+cbLen+crLen);
+ memset(frame, aY, yLen);
+ memset(frame+yLen, aCb, cbLen);
+ memset(frame+yLen+cbLen, aCr, crLen);
+
+ aData.mYChannel = frame;
+ aData.mYSize = IntSize(aWidth, aHeight);
+ aData.mYStride = aWidth;
+ aData.mCbCrStride = aWidth>>1;
+ aData.mCbChannel = frame + yLen;
+ aData.mCrChannel = aData.mCbChannel + cbLen;
+ aData.mCbCrSize = IntSize(aWidth>>1, aHeight>>1);
+ aData.mPicX = 0;
+ aData.mPicY = 0;
+ aData.mPicSize = IntSize(aWidth, aHeight);
+ aData.mStereoMode = StereoMode::MONO;
+}
+
+static void ReleaseFrame(layers::PlanarYCbCrData& aData)
+{
+ PR_Free(aData.mYChannel);
+}
+
+nsresult
+MediaEngineDefaultVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ if (mState != kAllocated) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+ if (!mTimer) {
+ return NS_ERROR_FAILURE;
+ }
+
+ aStream->AddTrack(aID, 0, new VideoSegment(), SourceMediaStream::ADDTRACK_QUEUED);
+
+ // Remember TrackID so we can end it later
+ mTrackID = aID;
+
+ // Start timer for subsequent frames
+#if (defined(MOZ_WIDGET_GONK) || defined(MOZ_WIDGET_ANDROID)) && defined(DEBUG)
+// emulator debug is very, very slow and has problems dealing with realtime audio inputs
+ mTimer->InitWithCallback(this, (1000 / mOpts.mFPS)*10, nsITimer::TYPE_REPEATING_SLACK);
+#else
+ mTimer->InitWithCallback(this, 1000 / mOpts.mFPS, nsITimer::TYPE_REPEATING_SLACK);
+#endif
+ mState = kStarted;
+
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
+{
+ if (mState != kStarted) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!mTimer) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mTimer->Cancel();
+ mTimer = nullptr;
+
+ aSource->EndTrack(aID);
+
+ mState = kStopped;
+ mImage = nullptr;
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultVideoSource::Restart(
+ AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
+{
+ // Update the target color
+ if (mCr <= 16) {
+ if (mCb < 240) {
+ mCb++;
+ } else {
+ mCr++;
+ }
+ } else if (mCb >= 240) {
+ if (mCr < 240) {
+ mCr++;
+ } else {
+ mCb--;
+ }
+ } else if (mCr >= 240) {
+ if (mCb > 16) {
+ mCb--;
+ } else {
+ mCr--;
+ }
+ } else {
+ mCr--;
+ }
+
+ // Allocate a single solid color image
+ RefPtr<layers::PlanarYCbCrImage> ycbcr_image = mImageContainer->CreatePlanarYCbCrImage();
+ layers::PlanarYCbCrData data;
+ AllocateSolidColorFrame(data, mOpts.mWidth, mOpts.mHeight, 0x80, mCb, mCr);
+
+#ifdef MOZ_WEBRTC
+ uint64_t timestamp = PR_Now();
+ YuvStamper::Encode(mOpts.mWidth, mOpts.mHeight, mOpts.mWidth,
+ data.mYChannel,
+ reinterpret_cast<unsigned char*>(&timestamp), sizeof(timestamp),
+ 0, 0);
+#endif
+
+ bool setData = ycbcr_image->CopyData(data);
+ MOZ_ASSERT(setData);
+
+ // SetData copies data, so we can free the frame
+ ReleaseFrame(data);
+
+ if (!setData) {
+ return NS_ERROR_FAILURE;
+ }
+
+ MonitorAutoLock lock(mMonitor);
+
+ // implicitly releases last image
+ mImage = ycbcr_image.forget();
+
+ return NS_OK;
+}
+
+void
+MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream *aSource,
+ TrackID aID,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ // AddTrack takes ownership of segment
+ VideoSegment segment;
+ MonitorAutoLock lock(mMonitor);
+ if (mState != kStarted) {
+ return;
+ }
+
+ // Note: we're not giving up mImage here
+ RefPtr<layers::Image> image = mImage;
+ StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
+
+ if (delta > 0) {
+ // nullptr images are allowed
+ IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0);
+ segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
+ // This can fail if either a) we haven't added the track yet, or b)
+ // we've removed or finished the track.
+ aSource->AppendToTrack(aID, &segment);
+ }
+}
+
+// generate 1k sine wave per second
+class SineWaveGenerator
+{
+public:
+ static const int bytesPerSample = 2;
+ static const int millisecondsPerSecond = PR_MSEC_PER_SEC;
+
+ explicit SineWaveGenerator(uint32_t aSampleRate, uint32_t aFrequency) :
+ mTotalLength(aSampleRate / aFrequency),
+ mReadLength(0) {
+ // If we allow arbitrary frequencies, there's no guarantee we won't get rounded here
+ // We could include an error term and adjust for it in generation; not worth the trouble
+ //MOZ_ASSERT(mTotalLength * aFrequency == aSampleRate);
+ mAudioBuffer = MakeUnique<int16_t[]>(mTotalLength);
+ for (int i = 0; i < mTotalLength; i++) {
+ // Set volume to -20db. It's from 32768.0 * 10^(-20/20) = 3276.8
+ mAudioBuffer[i] = (3276.8f * sin(2 * M_PI * i / mTotalLength));
+ }
+ }
+
+ // NOTE: only safely called from a single thread (MSG callback)
+ void generate(int16_t* aBuffer, int16_t aLengthInSamples) {
+ int16_t remaining = aLengthInSamples;
+
+ while (remaining) {
+ int16_t processSamples = 0;
+
+ if (mTotalLength - mReadLength >= remaining) {
+ processSamples = remaining;
+ } else {
+ processSamples = mTotalLength - mReadLength;
+ }
+ memcpy(aBuffer, &mAudioBuffer[mReadLength], processSamples * bytesPerSample);
+ aBuffer += processSamples;
+ mReadLength += processSamples;
+ remaining -= processSamples;
+ if (mReadLength == mTotalLength) {
+ mReadLength = 0;
+ }
+ }
+ }
+
+private:
+ UniquePtr<int16_t[]> mAudioBuffer;
+ int16_t mTotalLength;
+ int16_t mReadLength;
+};
+
+/**
+ * Default audio source.
+ */
+
+NS_IMPL_ISUPPORTS0(MediaEngineDefaultAudioSource)
+
+MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
+ : MediaEngineAudioSource(kReleased)
+ , mLastNotify(0)
+{}
+
+MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource()
+{}
+
+void
+MediaEngineDefaultAudioSource::GetName(nsAString& aName) const
+{
+ aName.AssignLiteral(u"Default Audio Device");
+ return;
+}
+
+void
+MediaEngineDefaultAudioSource::GetUUID(nsACString& aUUID) const
+{
+ aUUID.AssignLiteral("B7CBD7C1-53EF-42F9-8353-73F61C70C092");
+ return;
+}
+
+uint32_t
+MediaEngineDefaultAudioSource::GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const
+{
+ uint32_t distance = 0;
+#ifdef MOZ_WEBRTC
+ for (const auto* cs : aConstraintSets) {
+ distance = GetMinimumFitnessDistance(*cs, aDeviceId);
+ break; // distance is read from first entry only
+ }
+#endif
+ return distance;
+}
+
+nsresult
+MediaEngineDefaultAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint)
+{
+ if (mState != kReleased) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Mock failure for automated tests.
+ if (aConstraints.mDeviceId.IsString() &&
+ aConstraints.mDeviceId.GetAsString().EqualsASCII("bad device")) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mState = kAllocated;
+ // generate sine wave (default 1KHz)
+ mSineGenerator = new SineWaveGenerator(AUDIO_RATE,
+ static_cast<uint32_t>(aPrefs.mFreq ? aPrefs.mFreq : 1000));
+ *aOutHandle = nullptr;
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultAudioSource::Deallocate(AllocationHandle* aHandle)
+{
+ MOZ_ASSERT(!aHandle);
+ if (mState != kStopped && mState != kAllocated) {
+ return NS_ERROR_FAILURE;
+ }
+ mState = kReleased;
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ if (mState != kAllocated) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // AddTrack will take ownership of segment
+ AudioSegment* segment = new AudioSegment();
+ aStream->AddAudioTrack(aID, AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
+
+ // Remember TrackID so we can finish later
+ mTrackID = aID;
+
+ mLastNotify = 0;
+ mState = kStarted;
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
+{
+ if (mState != kStarted) {
+ return NS_ERROR_FAILURE;
+ }
+ aSource->EndTrack(aID);
+
+ mState = kStopped;
+ return NS_OK;
+}
+
+nsresult
+MediaEngineDefaultAudioSource::Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ return NS_OK;
+}
+
+void
+MediaEngineDefaultAudioSource::AppendToSegment(AudioSegment& aSegment,
+ TrackTicks aSamples,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
+ int16_t* dest = static_cast<int16_t*>(buffer->Data());
+
+ mSineGenerator->generate(dest, aSamples);
+ AutoTArray<const int16_t*,1> channels;
+ channels.AppendElement(dest);
+ aSegment.AppendFrames(buffer.forget(), channels, aSamples, aPrincipalHandle);
+}
+
+void
+MediaEngineDefaultAudioSource::NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream *aSource,
+ TrackID aID,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ MOZ_ASSERT(aID == mTrackID);
+ AudioSegment segment;
+ // avoid accumulating rounding errors
+ TrackTicks desired = aSource->TimeToTicksRoundUp(AUDIO_RATE, aDesiredTime);
+ TrackTicks delta = desired - mLastNotify;
+ mLastNotify += delta;
+ AppendToSegment(segment, delta, aPrincipalHandle);
+ aSource->AppendToTrack(mTrackID, &segment);
+}
+
+void
+MediaEngineDefault::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
+ nsTArray<RefPtr<MediaEngineVideoSource> >* aVSources) {
+ MutexAutoLock lock(mMutex);
+
+ // only supports camera sources (for now). See Bug 1038241
+ if (aMediaSource != dom::MediaSourceEnum::Camera) {
+ return;
+ }
+
+ // We once had code here to find a VideoSource with the same settings and re-use that.
+ // This no longer is possible since the resolution is being set in Allocate().
+
+ RefPtr<MediaEngineVideoSource> newSource = new MediaEngineDefaultVideoSource();
+ mVSources.AppendElement(newSource);
+ aVSources->AppendElement(newSource);
+
+ return;
+}
+
+void
+MediaEngineDefault::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
+ nsTArray<RefPtr<MediaEngineAudioSource> >* aASources) {
+ MutexAutoLock lock(mMutex);
+ int32_t len = mASources.Length();
+
+ // aMediaSource is ignored for audio devices (for now).
+
+ for (int32_t i = 0; i < len; i++) {
+ RefPtr<MediaEngineAudioSource> source = mASources.ElementAt(i);
+ if (source->IsAvailable()) {
+ aASources->AppendElement(source);
+ }
+ }
+
+ // All streams are currently busy, just make a new one.
+ if (aASources->Length() == 0) {
+ RefPtr<MediaEngineAudioSource> newSource =
+ new MediaEngineDefaultAudioSource();
+ mASources.AppendElement(newSource);
+ aASources->AppendElement(newSource);
+ }
+ return;
+}
+
+} // namespace mozilla
diff --git a/dom/media/webrtc/MediaEngineDefault.h b/dom/media/webrtc/MediaEngineDefault.h
new file mode 100644
index 000000000..c305e00bb
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineDefault.h
@@ -0,0 +1,215 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIAENGINEDEFAULT_H_
+#define MEDIAENGINEDEFAULT_H_
+
+#include "nsITimer.h"
+
+#include "nsAutoPtr.h"
+#include "nsCOMPtr.h"
+#include "DOMMediaStream.h"
+#include "nsComponentManagerUtils.h"
+#include "mozilla/Monitor.h"
+
+#include "VideoUtils.h"
+#include "MediaEngine.h"
+#include "VideoSegment.h"
+#include "AudioSegment.h"
+#include "StreamTracks.h"
+#ifdef MOZ_WEBRTC
+#include "MediaEngineCameraVideoSource.h"
+#endif
+#include "MediaStreamGraph.h"
+#include "MediaTrackConstraints.h"
+
+namespace mozilla {
+
+namespace layers {
+class ImageContainer;
+} // namespace layers
+
+class MediaEngineDefault;
+
+/**
+ * The default implementation of the MediaEngine interface.
+ */
+class MediaEngineDefaultVideoSource : public nsITimerCallback,
+#ifdef MOZ_WEBRTC
+ public MediaEngineCameraVideoSource
+#else
+ public MediaEngineVideoSource
+#endif
+{
+public:
+ MediaEngineDefaultVideoSource();
+
+ void GetName(nsAString&) const override;
+ void GetUUID(nsACString&) const override;
+
+ nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint) override;
+ nsresult Deallocate(AllocationHandle* aHandle) override;
+ nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) override;
+ nsresult Stop(SourceMediaStream*, TrackID) override;
+ nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+ void SetDirectListeners(bool aHasDirectListeners) override {};
+ void NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream *aSource,
+ TrackID aId,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle) override;
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const override;
+
+ bool IsFake() override {
+ return true;
+ }
+
+ dom::MediaSourceEnum GetMediaSource() const override {
+ return dom::MediaSourceEnum::Camera;
+ }
+
+ nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSITIMERCALLBACK
+
+protected:
+ ~MediaEngineDefaultVideoSource();
+
+ friend class MediaEngineDefault;
+
+ TrackID mTrackID;
+ nsCOMPtr<nsITimer> mTimer;
+ // mMonitor protects mImage access/changes, and transitions of mState
+ // from kStarted to kStopped (which are combined with EndTrack() and
+ // image changes).
+ Monitor mMonitor;
+ RefPtr<layers::Image> mImage;
+
+ RefPtr<layers::ImageContainer> mImageContainer;
+
+ MediaEnginePrefs mOpts;
+ int mCb;
+ int mCr;
+};
+
+class SineWaveGenerator;
+
+class MediaEngineDefaultAudioSource : public MediaEngineAudioSource
+{
+public:
+ MediaEngineDefaultAudioSource();
+
+ void GetName(nsAString&) const override;
+ void GetUUID(nsACString&) const override;
+
+ nsresult Allocate(const dom::MediaTrackConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint) override;
+ nsresult Deallocate(AllocationHandle* aHandle) override;
+ nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) override;
+ nsresult Stop(SourceMediaStream*, TrackID) override;
+ nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+ void SetDirectListeners(bool aHasDirectListeners) override {};
+ void inline AppendToSegment(AudioSegment& aSegment,
+ TrackTicks aSamples,
+ const PrincipalHandle& aPrincipalHandle);
+ void NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream *aSource,
+ TrackID aId,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle) override;
+
+ void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override
+ {}
+ void NotifyInputData(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override
+ {}
+ void DeviceChanged() override
+ {}
+ bool IsFake() override {
+ return true;
+ }
+
+ dom::MediaSourceEnum GetMediaSource() const override {
+ return dom::MediaSourceEnum::Microphone;
+ }
+
+ nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const override;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+protected:
+ ~MediaEngineDefaultAudioSource();
+
+ TrackID mTrackID;
+
+ TrackTicks mLastNotify; // Accessed in ::Start(), then on NotifyPull (from MSG thread)
+
+ // Created on Allocate, then accessed from NotifyPull (MSG thread)
+ nsAutoPtr<SineWaveGenerator> mSineGenerator;
+};
+
+
+class MediaEngineDefault : public MediaEngine
+{
+ typedef MediaEngine Super;
+public:
+ explicit MediaEngineDefault() : mMutex("mozilla::MediaEngineDefault") {}
+
+ void EnumerateVideoDevices(dom::MediaSourceEnum,
+ nsTArray<RefPtr<MediaEngineVideoSource> >*) override;
+ void EnumerateAudioDevices(dom::MediaSourceEnum,
+ nsTArray<RefPtr<MediaEngineAudioSource> >*) override;
+ void Shutdown() override {
+ MutexAutoLock lock(mMutex);
+
+ mVSources.Clear();
+ mASources.Clear();
+ };
+
+private:
+ ~MediaEngineDefault() {}
+
+ Mutex mMutex;
+ // protected with mMutex:
+
+ nsTArray<RefPtr<MediaEngineVideoSource> > mVSources;
+ nsTArray<RefPtr<MediaEngineAudioSource> > mASources;
+};
+
+} // namespace mozilla
+
+#endif /* NSMEDIAENGINEDEFAULT_H_ */
diff --git a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
new file mode 100644
index 000000000..881d85b4a
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -0,0 +1,509 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaEngineRemoteVideoSource.h"
+
+#include "mozilla/RefPtr.h"
+#include "VideoUtils.h"
+#include "nsIPrefService.h"
+#include "MediaTrackConstraints.h"
+#include "CamerasChild.h"
+
+extern mozilla::LogModule* GetMediaManagerLog();
+#define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
+#define LOGFRAME(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
+
+namespace mozilla {
+
+// These need a definition somewhere because template
+// code is allowed to take their address, and they aren't
+// guaranteed to have one without this.
+const unsigned int MediaEngineSource::kMaxDeviceNameLength;
+const unsigned int MediaEngineSource::kMaxUniqueIdLength;;
+
+using dom::ConstrainLongRange;
+
+NS_IMPL_ISUPPORTS0(MediaEngineRemoteVideoSource)
+
+MediaEngineRemoteVideoSource::MediaEngineRemoteVideoSource(
+ int aIndex, mozilla::camera::CaptureEngine aCapEngine,
+ dom::MediaSourceEnum aMediaSource, bool aScary, const char* aMonitorName)
+ : MediaEngineCameraVideoSource(aIndex, aMonitorName),
+ mMediaSource(aMediaSource),
+ mCapEngine(aCapEngine),
+ mScary(aScary)
+{
+ MOZ_ASSERT(aMediaSource != dom::MediaSourceEnum::Other);
+ mSettings.mWidth.Construct(0);
+ mSettings.mHeight.Construct(0);
+ mSettings.mFrameRate.Construct(0);
+ Init();
+}
+
+void
+MediaEngineRemoteVideoSource::Init()
+{
+ LOG((__PRETTY_FUNCTION__));
+ char deviceName[kMaxDeviceNameLength];
+ char uniqueId[kMaxUniqueIdLength];
+ if (mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::GetCaptureDevice,
+ mCapEngine, mCaptureIndex,
+ deviceName, kMaxDeviceNameLength,
+ uniqueId, kMaxUniqueIdLength, nullptr)) {
+ LOG(("Error initializing RemoteVideoSource (GetCaptureDevice)"));
+ return;
+ }
+
+ SetName(NS_ConvertUTF8toUTF16(deviceName));
+ SetUUID(uniqueId);
+
+ mInitDone = true;
+
+ return;
+}
+
+void
+MediaEngineRemoteVideoSource::Shutdown()
+{
+ LOG((__PRETTY_FUNCTION__));
+ if (!mInitDone) {
+ return;
+ }
+ Super::Shutdown();
+ if (mState == kStarted) {
+ SourceMediaStream *source;
+ bool empty;
+
+ while (1) {
+ {
+ MonitorAutoLock lock(mMonitor);
+ empty = mSources.IsEmpty();
+ if (empty) {
+ MOZ_ASSERT(mPrincipalHandles.IsEmpty());
+ break;
+ }
+ source = mSources[0];
+ }
+ Stop(source, kVideoTrack); // XXX change to support multiple tracks
+ }
+ MOZ_ASSERT(mState == kStopped);
+ }
+
+ for (auto& registered : mRegisteredHandles) {
+ MOZ_ASSERT(mState == kAllocated || mState == kStopped);
+ Deallocate(registered.get());
+ }
+
+ MOZ_ASSERT(mState == kReleased);
+ mInitDone = false;
+ return;
+}
+
+nsresult
+MediaEngineRemoteVideoSource::Allocate(
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint)
+{
+ LOG((__PRETTY_FUNCTION__));
+ AssertIsOnOwningThread();
+
+ if (!mInitDone) {
+ LOG(("Init not done"));
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult rv = Super::Allocate(aConstraints, aPrefs, aDeviceId, aOrigin,
+ aOutHandle, aOutBadConstraint);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (mState == kStarted &&
+ MOZ_LOG_TEST(GetMediaManagerLog(), mozilla::LogLevel::Debug)) {
+ MonitorAutoLock lock(mMonitor);
+ if (mSources.IsEmpty()) {
+ MOZ_ASSERT(mPrincipalHandles.IsEmpty());
+ LOG(("Video device %d reallocated", mCaptureIndex));
+ } else {
+ LOG(("Video device %d allocated shared", mCaptureIndex));
+ }
+ }
+ return NS_OK;
+}
+
+nsresult
+MediaEngineRemoteVideoSource::Deallocate(AllocationHandle* aHandle)
+{
+ LOG((__PRETTY_FUNCTION__));
+ AssertIsOnOwningThread();
+
+ Super::Deallocate(aHandle);
+
+ if (!mRegisteredHandles.Length()) {
+ if (mState != kStopped && mState != kAllocated) {
+ return NS_ERROR_FAILURE;
+ }
+ mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::ReleaseCaptureDevice,
+ mCapEngine, mCaptureIndex);
+ mState = kReleased;
+ LOG(("Video device %d deallocated", mCaptureIndex));
+ } else {
+ LOG(("Video device %d deallocated but still in use", mCaptureIndex));
+ }
+ return NS_OK;
+}
+
+nsresult
+MediaEngineRemoteVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ LOG((__PRETTY_FUNCTION__));
+ AssertIsOnOwningThread();
+ if (!mInitDone || !aStream) {
+ LOG(("No stream or init not done"));
+ return NS_ERROR_FAILURE;
+ }
+
+ {
+ MonitorAutoLock lock(mMonitor);
+ mSources.AppendElement(aStream);
+ mPrincipalHandles.AppendElement(aPrincipalHandle);
+ MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
+ }
+
+ aStream->AddTrack(aID, 0, new VideoSegment(), SourceMediaStream::ADDTRACK_QUEUED);
+
+ if (mState == kStarted) {
+ return NS_OK;
+ }
+ mImageContainer =
+ layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
+
+ mState = kStarted;
+ mTrackID = aID;
+
+ if (mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::StartCapture,
+ mCapEngine, mCaptureIndex, mCapability, this)) {
+ LOG(("StartCapture failed"));
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+MediaEngineRemoteVideoSource::Stop(mozilla::SourceMediaStream* aSource,
+ mozilla::TrackID aID)
+{
+ LOG((__PRETTY_FUNCTION__));
+ AssertIsOnOwningThread();
+ {
+ MonitorAutoLock lock(mMonitor);
+
+ // Drop any cached image so we don't start with a stale image on next
+ // usage
+ mImage = nullptr;
+
+ size_t i = mSources.IndexOf(aSource);
+ if (i == mSources.NoIndex) {
+ // Already stopped - this is allowed
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
+ mSources.RemoveElementAt(i);
+ mPrincipalHandles.RemoveElementAt(i);
+
+ aSource->EndTrack(aID);
+
+ if (!mSources.IsEmpty()) {
+ return NS_OK;
+ }
+ if (mState != kStarted) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mState = kStopped;
+ }
+
+ mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::StopCapture,
+ mCapEngine, mCaptureIndex);
+
+ return NS_OK;
+}
+
+nsresult
+MediaEngineRemoteVideoSource::Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ AssertIsOnOwningThread();
+ if (!mInitDone) {
+ LOG(("Init not done"));
+ return NS_ERROR_FAILURE;
+ }
+ MOZ_ASSERT(aHandle);
+ NormalizedConstraints constraints(aConstraints);
+ return ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId,
+ aOutBadConstraint);
+}
+
+nsresult
+MediaEngineRemoteVideoSource::UpdateSingleSource(
+ const AllocationHandle* aHandle,
+ const NormalizedConstraints& aNetConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId)) {
+ *aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
+ return NS_ERROR_FAILURE;
+ }
+
+ switch (mState) {
+ case kReleased:
+ MOZ_ASSERT(aHandle);
+ if (camera::GetChildAndCall(&camera::CamerasChild::AllocateCaptureDevice,
+ mCapEngine, GetUUID().get(),
+ kMaxUniqueIdLength, mCaptureIndex,
+ aHandle->mOrigin)) {
+ return NS_ERROR_FAILURE;
+ }
+ mState = kAllocated;
+ SetLastCapability(mCapability);
+ LOG(("Video device %d allocated for %s", mCaptureIndex,
+ aHandle->mOrigin.get()));
+ break;
+
+ case kStarted:
+ if (mCapability != mLastCapability) {
+ camera::GetChildAndCall(&camera::CamerasChild::StopCapture,
+ mCapEngine, mCaptureIndex);
+ if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture,
+ mCapEngine, mCaptureIndex, mCapability,
+ this)) {
+ LOG(("StartCapture failed"));
+ return NS_ERROR_FAILURE;
+ }
+ SetLastCapability(mCapability);
+ }
+ break;
+
+ default:
+ LOG(("Video device %d %s in ignored state %d", mCaptureIndex,
+ (aHandle? aHandle->mOrigin.get() : ""), mState));
+ break;
+ }
+ return NS_OK;
+}
+
+void
+MediaEngineRemoteVideoSource::SetLastCapability(
+ const webrtc::CaptureCapability& aCapability)
+{
+ mLastCapability = mCapability;
+
+ webrtc::CaptureCapability cap = aCapability;
+ RefPtr<MediaEngineRemoteVideoSource> that = this;
+
+ NS_DispatchToMainThread(media::NewRunnableFrom([that, cap]() mutable {
+ that->mSettings.mWidth.Value() = cap.width;
+ that->mSettings.mHeight.Value() = cap.height;
+ that->mSettings.mFrameRate.Value() = cap.maxFPS;
+ return NS_OK;
+ }));
+}
+
+void
+MediaEngineRemoteVideoSource::NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream* aSource,
+ TrackID aID, StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ VideoSegment segment;
+
+ MonitorAutoLock lock(mMonitor);
+ if (mState != kStarted) {
+ return;
+ }
+
+ StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
+
+ if (delta > 0) {
+ // nullptr images are allowed
+ AppendToTrack(aSource, mImage, aID, delta, aPrincipalHandle);
+ }
+}
+
+int
+MediaEngineRemoteVideoSource::FrameSizeChange(unsigned int w, unsigned int h,
+ unsigned int streams)
+{
+ mWidth = w;
+ mHeight = h;
+ LOG(("MediaEngineRemoteVideoSource Video FrameSizeChange: %ux%u", w, h));
+ return 0;
+}
+
+int
+MediaEngineRemoteVideoSource::DeliverFrame(unsigned char* buffer,
+ size_t size,
+ uint32_t time_stamp,
+ int64_t ntp_time,
+ int64_t render_time,
+ void *handle)
+{
+ // Check for proper state.
+ if (mState != kStarted) {
+ LOG(("DeliverFrame: video not started"));
+ return 0;
+ }
+
+ if ((size_t) (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2))) != size) {
+ MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
+ return 0;
+ }
+
+ // Create a video frame and append it to the track.
+ RefPtr<layers::PlanarYCbCrImage> image = mImageContainer->CreatePlanarYCbCrImage();
+
+ uint8_t* frame = static_cast<uint8_t*> (buffer);
+ const uint8_t lumaBpp = 8;
+ const uint8_t chromaBpp = 4;
+
+ // Take lots of care to round up!
+ layers::PlanarYCbCrData data;
+ data.mYChannel = frame;
+ data.mYSize = IntSize(mWidth, mHeight);
+ data.mYStride = (mWidth * lumaBpp + 7)/ 8;
+ data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
+ data.mCbChannel = frame + mHeight * data.mYStride;
+ data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
+ data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
+ data.mPicX = 0;
+ data.mPicY = 0;
+ data.mPicSize = IntSize(mWidth, mHeight);
+ data.mStereoMode = StereoMode::MONO;
+
+ if (!image->CopyData(data)) {
+ MOZ_ASSERT(false);
+ return 0;
+ }
+
+#ifdef DEBUG
+ static uint32_t frame_num = 0;
+ LOGFRAME(("frame %d (%dx%d); timestamp %u, ntp_time %" PRIu64 ", render_time %" PRIu64,
+ frame_num++, mWidth, mHeight, time_stamp, ntp_time, render_time));
+#endif
+
+ // we don't touch anything in 'this' until here (except for snapshot,
+ // which has it's own lock)
+ MonitorAutoLock lock(mMonitor);
+
+ // implicitly releases last image
+ mImage = image.forget();
+
+ // We'll push the frame into the MSG on the next NotifyPull. This will avoid
+ // swamping the MSG with frames should it be taking longer than normal to run
+ // an iteration.
+
+ return 0;
+}
+
+size_t
+MediaEngineRemoteVideoSource::NumCapabilities() const
+{
+ mHardcodedCapabilities.Clear();
+ int num = mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::NumberOfCapabilities,
+ mCapEngine,
+ GetUUID().get());
+ if (num < 1) {
+ // The default for devices that don't return discrete capabilities: treat
+ // them as supporting all capabilities orthogonally. E.g. screensharing.
+ // CaptureCapability defaults key values to 0, which means accept any value.
+ mHardcodedCapabilities.AppendElement(webrtc::CaptureCapability());
+ num = mHardcodedCapabilities.Length(); // 1
+ }
+ return num;
+}
+
+bool
+MediaEngineRemoteVideoSource::ChooseCapability(
+ const NormalizedConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId)
+{
+ AssertIsOnOwningThread();
+
+ switch(mMediaSource) {
+ case dom::MediaSourceEnum::Screen:
+ case dom::MediaSourceEnum::Window:
+ case dom::MediaSourceEnum::Application: {
+ FlattenedConstraints c(aConstraints);
+ // The actual resolution to constrain around is not easy to find ahead of
+ // time (and may in fact change over time), so as a hack, we push ideal
+ // and max constraints down to desktop_capture_impl.cc and finish the
+ // algorithm there.
+ mCapability.width = (c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 |
+ (c.mWidth.mMax & 0xffff);
+ mCapability.height = (c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 |
+ (c.mHeight.mMax & 0xffff);
+ mCapability.maxFPS = c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
+ return true;
+ }
+ default:
+ return MediaEngineCameraVideoSource::ChooseCapability(aConstraints, aPrefs, aDeviceId);
+ }
+
+}
+
+void
+MediaEngineRemoteVideoSource::GetCapability(size_t aIndex,
+ webrtc::CaptureCapability& aOut) const
+{
+ if (!mHardcodedCapabilities.IsEmpty()) {
+ MediaEngineCameraVideoSource::GetCapability(aIndex, aOut);
+ }
+ mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::GetCaptureCapability,
+ mCapEngine,
+ GetUUID().get(),
+ aIndex,
+ aOut);
+}
+
+void MediaEngineRemoteVideoSource::Refresh(int aIndex) {
+ // NOTE: mCaptureIndex might have changed when allocated!
+ // Use aIndex to update information, but don't change mCaptureIndex!!
+ // Caller looked up this source by uniqueId, so it shouldn't change
+ char deviceName[kMaxDeviceNameLength];
+ char uniqueId[kMaxUniqueIdLength];
+
+ if (mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::GetCaptureDevice,
+ mCapEngine, aIndex,
+ deviceName, sizeof(deviceName),
+ uniqueId, sizeof(uniqueId), nullptr)) {
+ return;
+ }
+
+ SetName(NS_ConvertUTF8toUTF16(deviceName));
+#ifdef DEBUG
+ MOZ_ASSERT(GetUUID().Equals(uniqueId));
+#endif
+}
+
+}
diff --git a/dom/media/webrtc/MediaEngineRemoteVideoSource.h b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
new file mode 100644
index 000000000..923e65654
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIAENGINE_REMOTE_VIDEO_SOURCE_H_
+#define MEDIAENGINE_REMOTE_VIDEO_SOURCE_H_
+
+#include "prcvar.h"
+#include "prthread.h"
+#include "nsIThread.h"
+#include "nsIRunnable.h"
+
+#include "mozilla/Mutex.h"
+#include "mozilla/Monitor.h"
+#include "nsCOMPtr.h"
+#include "nsThreadUtils.h"
+#include "DOMMediaStream.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsComponentManagerUtils.h"
+
+#include "VideoUtils.h"
+#include "MediaEngineCameraVideoSource.h"
+#include "VideoSegment.h"
+#include "AudioSegment.h"
+#include "StreamTracks.h"
+#include "MediaStreamGraph.h"
+
+#include "MediaEngineWrapper.h"
+#include "mozilla/dom/MediaStreamTrackBinding.h"
+
+// WebRTC library includes follow
+#include "webrtc/common.h"
+#include "webrtc/video_engine/include/vie_capture.h"
+#include "webrtc/video_engine/include/vie_render.h"
+#include "CamerasChild.h"
+
+#include "NullTransport.h"
+
+namespace webrtc {
+class I420VideoFrame;
+}
+
+namespace mozilla {
+
+/**
+ * The WebRTC implementation of the MediaEngine interface.
+ */
+class MediaEngineRemoteVideoSource : public MediaEngineCameraVideoSource,
+ public webrtc::ExternalRenderer
+{
+ typedef MediaEngineCameraVideoSource Super;
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ // ExternalRenderer
+ int FrameSizeChange(unsigned int w, unsigned int h,
+ unsigned int streams) override;
+ int DeliverFrame(unsigned char* buffer,
+ size_t size,
+ uint32_t time_stamp,
+ int64_t ntp_time,
+ int64_t render_time,
+ void *handle) override;
+ // XXX!!!! FIX THIS
+ int DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) override { return 0; };
+ bool IsTextureSupported() override { return false; };
+
+ // MediaEngineCameraVideoSource
+ MediaEngineRemoteVideoSource(int aIndex, mozilla::camera::CaptureEngine aCapEngine,
+ dom::MediaSourceEnum aMediaSource,
+ bool aScary = false,
+ const char* aMonitorName = "RemoteVideo.Monitor");
+
+ nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint) override;
+ nsresult Deallocate(AllocationHandle* aHandle) override;
+ nsresult Start(SourceMediaStream*, TrackID, const PrincipalHandle&) override;
+ nsresult Stop(SourceMediaStream*, TrackID) override;
+ nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+ void NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream* aSource,
+ TrackID aId,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle) override;
+ dom::MediaSourceEnum GetMediaSource() const override {
+ return mMediaSource;
+ }
+
+ bool ChooseCapability(const NormalizedConstraints &aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId) override;
+
+ void Refresh(int aIndex);
+
+ void Shutdown() override;
+
+ bool GetScary() const override { return mScary; }
+
+protected:
+ ~MediaEngineRemoteVideoSource() { }
+
+private:
+ // Initialize the needed Video engine interfaces.
+ void Init();
+ size_t NumCapabilities() const override;
+ void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) const override;
+ void SetLastCapability(const webrtc::CaptureCapability& aCapability);
+
+ nsresult
+ UpdateSingleSource(const AllocationHandle* aHandle,
+ const NormalizedConstraints& aNetConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+
+ dom::MediaSourceEnum mMediaSource; // source of media (camera | application | screen)
+ mozilla::camera::CaptureEngine mCapEngine;
+
+ // To only restart camera when needed, we keep track previous settings.
+ webrtc::CaptureCapability mLastCapability;
+ bool mScary;
+};
+
+}
+
+#endif /* MEDIAENGINE_REMOTE_VIDEO_SOURCE_H_ */
diff --git a/dom/media/webrtc/MediaEngineTabVideoSource.cpp b/dom/media/webrtc/MediaEngineTabVideoSource.cpp
new file mode 100644
index 000000000..d101bab1e
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineTabVideoSource.cpp
@@ -0,0 +1,395 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaEngineTabVideoSource.h"
+
+#include "mozilla/gfx/2D.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+#include "mozilla/dom/BindingDeclarations.h"
+#include "nsGlobalWindow.h"
+#include "nsIDOMClientRect.h"
+#include "nsIDocShell.h"
+#include "nsIPresShell.h"
+#include "nsPresContext.h"
+#include "gfxContext.h"
+#include "gfx2DGlue.h"
+#include "ImageContainer.h"
+#include "Layers.h"
+#include "nsIInterfaceRequestorUtils.h"
+#include "nsIDOMDocument.h"
+#include "nsITabSource.h"
+#include "VideoUtils.h"
+#include "nsServiceManagerUtils.h"
+#include "nsIPrefService.h"
+#include "MediaTrackConstraints.h"
+
+namespace mozilla {
+
+using namespace mozilla::gfx;
+
+NS_IMPL_ISUPPORTS(MediaEngineTabVideoSource, nsIDOMEventListener, nsITimerCallback)
+
+MediaEngineTabVideoSource::MediaEngineTabVideoSource()
+ : mBufWidthMax(0)
+ , mBufHeightMax(0)
+ , mWindowId(0)
+ , mScrollWithPage(false)
+ , mViewportOffsetX(0)
+ , mViewportOffsetY(0)
+ , mViewportWidth(0)
+ , mViewportHeight(0)
+ , mTimePerFrame(0)
+ , mDataSize(0)
+ , mBlackedoutWindow(false)
+ , mMonitor("MediaEngineTabVideoSource") {}
+
+nsresult
+MediaEngineTabVideoSource::StartRunnable::Run()
+{
+ mVideoSource->Draw();
+ mVideoSource->mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+ mVideoSource->mTimer->InitWithCallback(mVideoSource, mVideoSource->mTimePerFrame, nsITimer:: TYPE_REPEATING_SLACK);
+ if (mVideoSource->mTabSource) {
+ mVideoSource->mTabSource->NotifyStreamStart(mVideoSource->mWindow);
+ }
+ return NS_OK;
+}
+
+nsresult
+MediaEngineTabVideoSource::StopRunnable::Run()
+{
+ if (mVideoSource->mTimer) {
+ mVideoSource->mTimer->Cancel();
+ mVideoSource->mTimer = nullptr;
+ }
+ if (mVideoSource->mTabSource) {
+ mVideoSource->mTabSource->NotifyStreamStop(mVideoSource->mWindow);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+MediaEngineTabVideoSource::HandleEvent(nsIDOMEvent *event) {
+ Draw();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+MediaEngineTabVideoSource::Notify(nsITimer*) {
+ Draw();
+ return NS_OK;
+}
+
+nsresult
+MediaEngineTabVideoSource::InitRunnable::Run()
+{
+ if (mVideoSource->mWindowId != -1) {
+ nsGlobalWindow* globalWindow =
+ nsGlobalWindow::GetOuterWindowWithId(mVideoSource->mWindowId);
+ if (!globalWindow) {
+ // We can't access the window, just send a blacked out screen.
+ mVideoSource->mWindow = nullptr;
+ mVideoSource->mBlackedoutWindow = true;
+ } else {
+ nsCOMPtr<nsPIDOMWindowOuter> window = globalWindow->AsOuter();
+ if (window) {
+ mVideoSource->mWindow = window;
+ mVideoSource->mBlackedoutWindow = false;
+ }
+ }
+ }
+ if (!mVideoSource->mWindow && !mVideoSource->mBlackedoutWindow) {
+ nsresult rv;
+ mVideoSource->mTabSource = do_GetService(NS_TABSOURCESERVICE_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ nsCOMPtr<mozIDOMWindowProxy> win;
+ rv = mVideoSource->mTabSource->GetTabToStream(getter_AddRefs(win));
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!win)
+ return NS_OK;
+
+ mVideoSource->mWindow = nsPIDOMWindowOuter::From(win);
+ MOZ_ASSERT(mVideoSource->mWindow);
+ }
+ nsCOMPtr<nsIRunnable> start(new StartRunnable(mVideoSource));
+ start->Run();
+ return NS_OK;
+}
+
+nsresult
+MediaEngineTabVideoSource::DestroyRunnable::Run()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ mVideoSource->mWindow = nullptr;
+ mVideoSource->mTabSource = nullptr;
+
+ return NS_OK;
+}
+
+void
+MediaEngineTabVideoSource::GetName(nsAString_internal& aName) const
+{
+ aName.AssignLiteral(u"&getUserMedia.videoSource.tabShare;");
+}
+
+void
+MediaEngineTabVideoSource::GetUUID(nsACString_internal& aUuid) const
+{
+ aUuid.AssignLiteral("tab");
+}
+
+#define DEFAULT_TABSHARE_VIDEO_MAX_WIDTH 4096
+#define DEFAULT_TABSHARE_VIDEO_MAX_HEIGHT 4096
+#define DEFAULT_TABSHARE_VIDEO_FRAMERATE 30
+
+nsresult
+MediaEngineTabVideoSource::Allocate(const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint)
+{
+ // windowId is not a proper constraint, so just read it.
+ // It has no well-defined behavior in advanced, so ignore it there.
+
+ mWindowId = aConstraints.mBrowserWindow.WasPassed() ?
+ aConstraints.mBrowserWindow.Value() : -1;
+ *aOutHandle = nullptr;
+
+ {
+ MonitorAutoLock mon(mMonitor);
+ mState = kAllocated;
+ }
+
+ return Restart(nullptr, aConstraints, aPrefs, aDeviceId, aOutBadConstraint);
+}
+
+nsresult
+MediaEngineTabVideoSource::Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const mozilla::MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ MOZ_ASSERT(!aHandle);
+
+ // scrollWithPage is not proper a constraint, so just read it.
+ // It has no well-defined behavior in advanced, so ignore it there.
+
+ mScrollWithPage = aConstraints.mScrollWithPage.WasPassed() ?
+ aConstraints.mScrollWithPage.Value() : false;
+
+ FlattenedConstraints c(aConstraints);
+
+ mBufWidthMax = c.mWidth.Get(DEFAULT_TABSHARE_VIDEO_MAX_WIDTH);
+ mBufHeightMax = c.mHeight.Get(DEFAULT_TABSHARE_VIDEO_MAX_HEIGHT);
+ double frameRate = c.mFrameRate.Get(DEFAULT_TABSHARE_VIDEO_FRAMERATE);
+ mTimePerFrame = std::max(10, int(1000.0 / (frameRate > 0? frameRate : 1)));
+
+ if (!mScrollWithPage) {
+ mViewportOffsetX = c.mViewportOffsetX.Get(0);
+ mViewportOffsetY = c.mViewportOffsetY.Get(0);
+ mViewportWidth = c.mViewportWidth.Get(INT32_MAX);
+ mViewportHeight = c.mViewportHeight.Get(INT32_MAX);
+ }
+ return NS_OK;
+}
+
+nsresult
+MediaEngineTabVideoSource::Deallocate(AllocationHandle* aHandle)
+{
+ MOZ_ASSERT(!aHandle);
+ NS_DispatchToMainThread(do_AddRef(new DestroyRunnable(this)));
+
+ {
+ MonitorAutoLock mon(mMonitor);
+ mState = kReleased;
+ }
+ return NS_OK;
+}
+
+nsresult
+MediaEngineTabVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ nsCOMPtr<nsIRunnable> runnable;
+ if (!mWindow)
+ runnable = new InitRunnable(this);
+ else
+ runnable = new StartRunnable(this);
+ NS_DispatchToMainThread(runnable);
+ aStream->AddTrack(aID, 0, new VideoSegment());
+
+ {
+ MonitorAutoLock mon(mMonitor);
+ mState = kStarted;
+ }
+
+ return NS_OK;
+}
+
+void
+MediaEngineTabVideoSource::NotifyPull(MediaStreamGraph*,
+ SourceMediaStream* aSource,
+ TrackID aID, StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ VideoSegment segment;
+ MonitorAutoLock mon(mMonitor);
+ if (mState != kStarted) {
+ return;
+ }
+
+ // Note: we're not giving up mImage here
+ RefPtr<layers::SourceSurfaceImage> image = mImage;
+ StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
+ if (delta > 0) {
+ // nullptr images are allowed
+ gfx::IntSize size = image ? image->GetSize() : IntSize(0, 0);
+ segment.AppendFrame(image.forget().downcast<layers::Image>(), delta, size,
+ aPrincipalHandle);
+ // This can fail if either a) we haven't added the track yet, or b)
+ // we've removed or finished the track.
+ aSource->AppendToTrack(aID, &(segment));
+ }
+}
+
+void
+MediaEngineTabVideoSource::Draw() {
+ if (!mWindow && !mBlackedoutWindow) {
+ return;
+ }
+
+ if (mWindow) {
+ if (mScrollWithPage || mViewportWidth == INT32_MAX) {
+ mWindow->GetInnerWidth(&mViewportWidth);
+ }
+ if (mScrollWithPage || mViewportHeight == INT32_MAX) {
+ mWindow->GetInnerHeight(&mViewportHeight);
+ }
+ if (!mViewportWidth || !mViewportHeight) {
+ return;
+ }
+ } else {
+ mViewportWidth = 640;
+ mViewportHeight = 480;
+ }
+
+ IntSize size;
+ {
+ float pixelRatio;
+ if (mWindow) {
+ pixelRatio = mWindow->GetDevicePixelRatio(CallerType::System);
+ } else {
+ pixelRatio = 1.0f;
+ }
+ const int32_t deviceWidth = (int32_t)(pixelRatio * mViewportWidth);
+ const int32_t deviceHeight = (int32_t)(pixelRatio * mViewportHeight);
+
+ if ((deviceWidth <= mBufWidthMax) && (deviceHeight <= mBufHeightMax)) {
+ size = IntSize(deviceWidth, deviceHeight);
+ } else {
+ const float scaleWidth = (float)mBufWidthMax / (float)deviceWidth;
+ const float scaleHeight = (float)mBufHeightMax / (float)deviceHeight;
+ const float scale = scaleWidth < scaleHeight ? scaleWidth : scaleHeight;
+
+ size = IntSize((int)(scale * deviceWidth), (int)(scale * deviceHeight));
+ }
+ }
+
+ gfxImageFormat format = SurfaceFormat::X8R8G8B8_UINT32;
+ uint32_t stride = gfxASurface::FormatStrideForWidth(format, size.width);
+
+ if (mDataSize < static_cast<size_t>(stride * size.height)) {
+ mDataSize = stride * size.height;
+ mData = MakeUniqueFallible<unsigned char[]>(mDataSize);
+ }
+ if (!mData) {
+ return;
+ }
+
+ nsCOMPtr<nsIPresShell> presShell;
+ if (mWindow) {
+ RefPtr<nsPresContext> presContext;
+ nsIDocShell* docshell = mWindow->GetDocShell();
+ if (docshell) {
+ docshell->GetPresContext(getter_AddRefs(presContext));
+ }
+ if (!presContext) {
+ return;
+ }
+ presShell = presContext->PresShell();
+ }
+
+ RefPtr<layers::ImageContainer> container =
+ layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
+ RefPtr<DrawTarget> dt =
+ Factory::CreateDrawTargetForData(BackendType::CAIRO,
+ mData.get(),
+ size,
+ stride,
+ SurfaceFormat::B8G8R8X8);
+ if (!dt || !dt->IsValid()) {
+ return;
+ }
+ RefPtr<gfxContext> context = gfxContext::CreateOrNull(dt);
+ MOZ_ASSERT(context); // already checked the draw target above
+ context->SetMatrix(context->CurrentMatrix().Scale((((float) size.width)/mViewportWidth),
+ (((float) size.height)/mViewportHeight)));
+
+ if (mWindow) {
+ nscolor bgColor = NS_RGB(255, 255, 255);
+ uint32_t renderDocFlags = mScrollWithPage? 0 :
+ (nsIPresShell::RENDER_IGNORE_VIEWPORT_SCROLLING |
+ nsIPresShell::RENDER_DOCUMENT_RELATIVE);
+ nsRect r(nsPresContext::CSSPixelsToAppUnits((float)mViewportOffsetX),
+ nsPresContext::CSSPixelsToAppUnits((float)mViewportOffsetY),
+ nsPresContext::CSSPixelsToAppUnits((float)mViewportWidth),
+ nsPresContext::CSSPixelsToAppUnits((float)mViewportHeight));
+ NS_ENSURE_SUCCESS_VOID(presShell->RenderDocument(r, renderDocFlags, bgColor, context));
+ }
+
+ RefPtr<SourceSurface> surface = dt->Snapshot();
+ if (!surface) {
+ return;
+ }
+
+ RefPtr<layers::SourceSurfaceImage> image = new layers::SourceSurfaceImage(size, surface);
+
+ MonitorAutoLock mon(mMonitor);
+ mImage = image;
+}
+
+nsresult
+MediaEngineTabVideoSource::Stop(mozilla::SourceMediaStream* aSource,
+ mozilla::TrackID aID)
+{
+ // If mBlackedoutWindow is true, we may be running
+ // despite mWindow == nullptr.
+ if (!mWindow && !mBlackedoutWindow) {
+ return NS_OK;
+ }
+
+ NS_DispatchToMainThread(new StopRunnable(this));
+
+ {
+ MonitorAutoLock mon(mMonitor);
+ mState = kStopped;
+ aSource->EndTrack(aID);
+ }
+ return NS_OK;
+}
+
+bool
+MediaEngineTabVideoSource::IsFake()
+{
+ return false;
+}
+
+}
diff --git a/dom/media/webrtc/MediaEngineTabVideoSource.h b/dom/media/webrtc/MediaEngineTabVideoSource.h
new file mode 100644
index 000000000..d11bd7765
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineTabVideoSource.h
@@ -0,0 +1,114 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIDOMEventListener.h"
+#include "MediaEngine.h"
+#include "ImageContainer.h"
+#include "nsITimer.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/UniquePtr.h"
+#include "nsITabSource.h"
+
+namespace mozilla {
+
+class MediaEngineTabVideoSource : public MediaEngineVideoSource, nsIDOMEventListener, nsITimerCallback {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIDOMEVENTLISTENER
+ NS_DECL_NSITIMERCALLBACK
+ MediaEngineTabVideoSource();
+
+ void GetName(nsAString_internal&) const override;
+ void GetUUID(nsACString_internal&) const override;
+
+ bool GetScary() const override {
+ return true;
+ }
+
+ nsresult Allocate(const dom::MediaTrackConstraints &,
+ const mozilla::MediaEnginePrefs&,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint) override;
+ nsresult Deallocate(AllocationHandle* aHandle) override;
+ nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID, const mozilla::PrincipalHandle&) override;
+ void SetDirectListeners(bool aHasDirectListeners) override {};
+ void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, const mozilla::PrincipalHandle& aPrincipalHandle) override;
+ nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID) override;
+ nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const mozilla::MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+ bool IsFake() override;
+ dom::MediaSourceEnum GetMediaSource() const override {
+ return dom::MediaSourceEnum::Browser;
+ }
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const override
+ {
+ return 0;
+ }
+
+ nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ void Draw();
+
+ class StartRunnable : public Runnable {
+ public:
+ explicit StartRunnable(MediaEngineTabVideoSource *videoSource) : mVideoSource(videoSource) {}
+ NS_IMETHOD Run();
+ RefPtr<MediaEngineTabVideoSource> mVideoSource;
+ };
+
+ class StopRunnable : public Runnable {
+ public:
+ explicit StopRunnable(MediaEngineTabVideoSource *videoSource) : mVideoSource(videoSource) {}
+ NS_IMETHOD Run();
+ RefPtr<MediaEngineTabVideoSource> mVideoSource;
+ };
+
+ class InitRunnable : public Runnable {
+ public:
+ explicit InitRunnable(MediaEngineTabVideoSource *videoSource) : mVideoSource(videoSource) {}
+ NS_IMETHOD Run();
+ RefPtr<MediaEngineTabVideoSource> mVideoSource;
+ };
+
+ class DestroyRunnable : public Runnable {
+ public:
+ explicit DestroyRunnable(MediaEngineTabVideoSource* videoSource) : mVideoSource(videoSource) {}
+ NS_IMETHOD Run();
+ RefPtr<MediaEngineTabVideoSource> mVideoSource;
+ };
+
+protected:
+ ~MediaEngineTabVideoSource() {}
+
+private:
+ int32_t mBufWidthMax;
+ int32_t mBufHeightMax;
+ int64_t mWindowId;
+ bool mScrollWithPage;
+ int32_t mViewportOffsetX;
+ int32_t mViewportOffsetY;
+ int32_t mViewportWidth;
+ int32_t mViewportHeight;
+ int32_t mTimePerFrame;
+ UniquePtr<unsigned char[]> mData;
+ size_t mDataSize;
+ nsCOMPtr<nsPIDOMWindowOuter> mWindow;
+ // If this is set, we will run despite mWindow == nullptr.
+ bool mBlackedoutWindow;
+ RefPtr<layers::SourceSurfaceImage> mImage;
+ nsCOMPtr<nsITimer> mTimer;
+ Monitor mMonitor;
+ nsCOMPtr<nsITabSource> mTabSource;
+ };
+}
diff --git a/dom/media/webrtc/MediaEngineWebRTC.cpp b/dom/media/webrtc/MediaEngineWebRTC.cpp
new file mode 100644
index 000000000..522f23f61
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -0,0 +1,431 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set sw=2 ts=8 et ft=cpp : */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+
+#include "CSFLog.h"
+#include "prenv.h"
+
+#include "mozilla/Logging.h"
+#ifdef XP_WIN
+#include "mozilla/WindowsVersion.h"
+#endif
+
+static mozilla::LazyLogModule sGetUserMediaLog("GetUserMedia");
+
+#include "MediaEngineWebRTC.h"
+#include "ImageContainer.h"
+#include "nsIComponentRegistrar.h"
+#include "MediaEngineTabVideoSource.h"
+#include "MediaEngineRemoteVideoSource.h"
+#include "CamerasChild.h"
+#include "nsITabSource.h"
+#include "MediaTrackConstraints.h"
+
+#ifdef MOZ_WIDGET_ANDROID
+#include "AndroidJNIWrapper.h"
+#include "AndroidBridge.h"
+#endif
+
+#undef LOG
+#define LOG(args) MOZ_LOG(sGetUserMediaLog, mozilla::LogLevel::Debug, args)
+
+namespace mozilla {
+
+// statics from AudioInputCubeb
+nsTArray<int>* AudioInputCubeb::mDeviceIndexes;
+int AudioInputCubeb::mDefaultDevice = -1;
+nsTArray<nsCString>* AudioInputCubeb::mDeviceNames;
+cubeb_device_collection* AudioInputCubeb::mDevices = nullptr;
+bool AudioInputCubeb::mAnyInUse = false;
+StaticMutex AudioInputCubeb::sMutex;
+
+// AudioDeviceID is an annoying opaque value that's really a string
+// pointer, and is freed when the cubeb_device_collection is destroyed
+
+void AudioInputCubeb::UpdateDeviceList()
+{
+ cubeb* cubebContext = CubebUtils::GetCubebContext();
+ if (!cubebContext) {
+ return;
+ }
+
+ cubeb_device_collection *devices = nullptr;
+
+ if (CUBEB_OK != cubeb_enumerate_devices(cubebContext,
+ CUBEB_DEVICE_TYPE_INPUT,
+ &devices)) {
+ return;
+ }
+
+ for (auto& device_index : (*mDeviceIndexes)) {
+ device_index = -1; // unmapped
+ }
+ // We keep all the device names, but wipe the mappings and rebuild them
+
+ // Calculate translation from existing mDevices to new devices. Note we
+ // never end up with less devices than before, since people have
+ // stashed indexes.
+ // For some reason the "fake" device for automation is marked as DISABLED,
+ // so white-list it.
+ mDefaultDevice = -1;
+ for (uint32_t i = 0; i < devices->count; i++) {
+ LOG(("Cubeb device %u: type 0x%x, state 0x%x, name %s, id %p",
+ i, devices->device[i]->type, devices->device[i]->state,
+ devices->device[i]->friendly_name, devices->device[i]->device_id));
+ if (devices->device[i]->type == CUBEB_DEVICE_TYPE_INPUT && // paranoia
+ (devices->device[i]->state == CUBEB_DEVICE_STATE_ENABLED ||
+ (devices->device[i]->state == CUBEB_DEVICE_STATE_DISABLED &&
+ devices->device[i]->friendly_name &&
+ strcmp(devices->device[i]->friendly_name, "Sine source at 440 Hz") == 0)))
+ {
+ auto j = mDeviceNames->IndexOf(devices->device[i]->device_id);
+ if (j != nsTArray<nsCString>::NoIndex) {
+ // match! update the mapping
+ (*mDeviceIndexes)[j] = i;
+ } else {
+ // new device, add to the array
+ mDeviceIndexes->AppendElement(i);
+ mDeviceNames->AppendElement(devices->device[i]->device_id);
+ j = mDeviceIndexes->Length()-1;
+ }
+ if (devices->device[i]->preferred & CUBEB_DEVICE_PREF_VOICE) {
+ // There can be only one... we hope
+ NS_ASSERTION(mDefaultDevice == -1, "multiple default cubeb input devices!");
+ mDefaultDevice = j;
+ }
+ }
+ }
+ LOG(("Cubeb default input device %d", mDefaultDevice));
+ StaticMutexAutoLock lock(sMutex);
+ // swap state
+ if (mDevices) {
+ cubeb_device_collection_destroy(mDevices);
+ }
+ mDevices = devices;
+}
+
+MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
+ : mMutex("mozilla::MediaEngineWebRTC"),
+ mVoiceEngine(nullptr),
+ mAudioInput(nullptr),
+ mFullDuplex(aPrefs.mFullDuplex),
+ mExtendedFilter(aPrefs.mExtendedFilter),
+ mDelayAgnostic(aPrefs.mDelayAgnostic),
+ mHasTabVideoSource(false)
+{
+ nsCOMPtr<nsIComponentRegistrar> compMgr;
+ NS_GetComponentRegistrar(getter_AddRefs(compMgr));
+ if (compMgr) {
+ compMgr->IsContractIDRegistered(NS_TABSOURCESERVICE_CONTRACTID, &mHasTabVideoSource);
+ }
+ // XXX
+ gFarendObserver = new AudioOutputObserver();
+
+ camera::GetChildAndCall(
+ &camera::CamerasChild::AddDeviceChangeCallback,
+ this);
+}
+
+void
+MediaEngineWebRTC::SetFakeDeviceChangeEvents()
+{
+ camera::GetChildAndCall(
+ &camera::CamerasChild::SetFakeDeviceChangeEvents);
+}
+
+void
+MediaEngineWebRTC::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
+ nsTArray<RefPtr<MediaEngineVideoSource> >* aVSources)
+{
+ // We spawn threads to handle gUM runnables, so we must protect the member vars
+ MutexAutoLock lock(mMutex);
+
+ mozilla::camera::CaptureEngine capEngine = mozilla::camera::InvalidEngine;
+
+#ifdef MOZ_WIDGET_ANDROID
+ // get the JVM
+ JavaVM* jvm;
+ JNIEnv* const env = jni::GetEnvForThread();
+ MOZ_ALWAYS_TRUE(!env->GetJavaVM(&jvm));
+
+ if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) {
+ LOG(("VieCapture:SetAndroidObjects Failed"));
+ return;
+ }
+#endif
+ bool scaryKind = false; // flag sources with cross-origin exploit potential
+
+ switch (aMediaSource) {
+ case dom::MediaSourceEnum::Window:
+ capEngine = mozilla::camera::WinEngine;
+ break;
+ case dom::MediaSourceEnum::Application:
+ capEngine = mozilla::camera::AppEngine;
+ break;
+ case dom::MediaSourceEnum::Screen:
+ capEngine = mozilla::camera::ScreenEngine;
+ scaryKind = true;
+ break;
+ case dom::MediaSourceEnum::Browser:
+ capEngine = mozilla::camera::BrowserEngine;
+ scaryKind = true;
+ break;
+ case dom::MediaSourceEnum::Camera:
+ capEngine = mozilla::camera::CameraEngine;
+ break;
+ default:
+ // BOOM
+ MOZ_CRASH("No valid video engine");
+ break;
+ }
+
+ /**
+ * We still enumerate every time, in case a new device was plugged in since
+ * the last call. TODO: Verify that WebRTC actually does deal with hotplugging
+ * new devices (with or without new engine creation) and accordingly adjust.
+ * Enumeration is not neccessary if GIPS reports the same set of devices
+ * for a given instance of the engine. Likewise, if a device was plugged out,
+ * mVideoSources must be updated.
+ */
+ int num;
+ num = mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::NumberOfCaptureDevices,
+ capEngine);
+
+ for (int i = 0; i < num; i++) {
+ char deviceName[MediaEngineSource::kMaxDeviceNameLength];
+ char uniqueId[MediaEngineSource::kMaxUniqueIdLength];
+ bool scarySource = false;
+
+ // paranoia
+ deviceName[0] = '\0';
+ uniqueId[0] = '\0';
+ int error;
+
+ error = mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::GetCaptureDevice,
+ capEngine,
+ i, deviceName,
+ sizeof(deviceName), uniqueId,
+ sizeof(uniqueId),
+ &scarySource);
+ if (error) {
+ LOG(("camera:GetCaptureDevice: Failed %d", error ));
+ continue;
+ }
+#ifdef DEBUG
+ LOG((" Capture Device Index %d, Name %s", i, deviceName));
+
+ webrtc::CaptureCapability cap;
+ int numCaps = mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::NumberOfCapabilities,
+ capEngine,
+ uniqueId);
+ LOG(("Number of Capabilities %d", numCaps));
+ for (int j = 0; j < numCaps; j++) {
+ if (mozilla::camera::GetChildAndCall(
+ &mozilla::camera::CamerasChild::GetCaptureCapability,
+ capEngine,
+ uniqueId,
+ j, cap) != 0) {
+ break;
+ }
+ LOG(("type=%d width=%d height=%d maxFPS=%d",
+ cap.rawType, cap.width, cap.height, cap.maxFPS ));
+ }
+#endif
+
+ if (uniqueId[0] == '\0') {
+ // In case a device doesn't set uniqueId!
+ strncpy(uniqueId, deviceName, sizeof(uniqueId));
+ uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe
+ }
+
+ RefPtr<MediaEngineVideoSource> vSource;
+ NS_ConvertUTF8toUTF16 uuid(uniqueId);
+ if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
+ // We've already seen this device, just refresh and append.
+ static_cast<MediaEngineRemoteVideoSource*>(vSource.get())->Refresh(i);
+ aVSources->AppendElement(vSource.get());
+ } else {
+ vSource = new MediaEngineRemoteVideoSource(i, capEngine, aMediaSource,
+ scaryKind || scarySource);
+ mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
+ aVSources->AppendElement(vSource);
+ }
+ }
+
+ if (mHasTabVideoSource || dom::MediaSourceEnum::Browser == aMediaSource) {
+ aVSources->AppendElement(new MediaEngineTabVideoSource());
+ }
+}
+
+bool
+MediaEngineWebRTC::SupportsDuplex()
+{
+#ifndef XP_WIN
+ return mFullDuplex;
+#else
+ return IsVistaOrLater() && mFullDuplex;
+#endif
+}
+
+void
+MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
+ nsTArray<RefPtr<MediaEngineAudioSource> >* aASources)
+{
+ ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
+ // We spawn threads to handle gUM runnables, so we must protect the member vars
+ MutexAutoLock lock(mMutex);
+
+ if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
+ RefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
+ new MediaEngineWebRTCAudioCaptureSource(nullptr);
+ aASources->AppendElement(audioCaptureSource);
+ return;
+ }
+
+#ifdef MOZ_WIDGET_ANDROID
+ jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
+
+ // get the JVM
+ JavaVM* jvm;
+ JNIEnv* const env = jni::GetEnvForThread();
+ MOZ_ALWAYS_TRUE(!env->GetJavaVM(&jvm));
+
+ if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
+ LOG(("VoiceEngine:SetAndroidObjects Failed"));
+ return;
+ }
+#endif
+
+ if (!mVoiceEngine) {
+ mConfig.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter));
+ mConfig.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic));
+
+ mVoiceEngine = webrtc::VoiceEngine::Create(mConfig);
+ if (!mVoiceEngine) {
+ return;
+ }
+ }
+
+ ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
+ if (!ptrVoEBase) {
+ return;
+ }
+
+ // Always re-init the voice engine, since if we close the last use we
+ // DeInitEngine() and Terminate(), which shuts down Process() - but means
+ // we have to Init() again before using it. Init() when already inited is
+ // just a no-op, so call always.
+ if (ptrVoEBase->Init() < 0) {
+ return;
+ }
+
+ if (!mAudioInput) {
+ if (SupportsDuplex()) {
+ // The platform_supports_full_duplex.
+ mAudioInput = new mozilla::AudioInputCubeb(mVoiceEngine);
+ } else {
+ mAudioInput = new mozilla::AudioInputWebRTC(mVoiceEngine);
+ }
+ }
+
+ int nDevices = 0;
+ mAudioInput->GetNumOfRecordingDevices(nDevices);
+ int i;
+#if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK)
+ i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g
+#else
+ // -1 is "default communications device" depending on OS in webrtc.org code
+ i = -1;
+#endif
+ for (; i < nDevices; i++) {
+ // We use constants here because GetRecordingDeviceName takes char[128].
+ char deviceName[128];
+ char uniqueId[128];
+ // paranoia; jingle doesn't bother with this
+ deviceName[0] = '\0';
+ uniqueId[0] = '\0';
+
+ int error = mAudioInput->GetRecordingDeviceName(i, deviceName, uniqueId);
+ if (error) {
+ LOG((" VoEHardware:GetRecordingDeviceName: Failed %d", error));
+ continue;
+ }
+
+ if (uniqueId[0] == '\0') {
+ // Mac and Linux don't set uniqueId!
+ MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia
+ strcpy(uniqueId, deviceName); // safe given assert and initialization/error-check
+ }
+
+ RefPtr<MediaEngineAudioSource> aSource;
+ NS_ConvertUTF8toUTF16 uuid(uniqueId);
+ if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
+ // We've already seen this device, just append.
+ aASources->AppendElement(aSource.get());
+ } else {
+ AudioInput* audioinput = mAudioInput;
+ if (SupportsDuplex()) {
+ // The platform_supports_full_duplex.
+
+ // For cubeb, it has state (the selected ID)
+ // XXX just use the uniqueID for cubeb and support it everywhere, and get rid of this
+ // XXX Small window where the device list/index could change!
+ audioinput = new mozilla::AudioInputCubeb(mVoiceEngine, i);
+ }
+ aSource = new MediaEngineWebRTCMicrophoneSource(mVoiceEngine, audioinput,
+ i, deviceName, uniqueId);
+ mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
+ aASources->AppendElement(aSource);
+ }
+ }
+}
+
+void
+MediaEngineWebRTC::Shutdown()
+{
+ // This is likely paranoia
+ MutexAutoLock lock(mMutex);
+
+ if (camera::GetCamerasChildIfExists()) {
+ camera::GetChildAndCall(
+ &camera::CamerasChild::RemoveDeviceChangeCallback, this);
+ }
+
+ LOG(("%s", __FUNCTION__));
+ // Shutdown all the sources, since we may have dangling references to the
+ // sources in nsDOMUserMediaStreams waiting for GC/CC
+ for (auto iter = mVideoSources.Iter(); !iter.Done(); iter.Next()) {
+ MediaEngineVideoSource* source = iter.UserData();
+ if (source) {
+ source->Shutdown();
+ }
+ }
+ for (auto iter = mAudioSources.Iter(); !iter.Done(); iter.Next()) {
+ MediaEngineAudioSource* source = iter.UserData();
+ if (source) {
+ source->Shutdown();
+ }
+ }
+ mVideoSources.Clear();
+ mAudioSources.Clear();
+
+ if (mVoiceEngine) {
+ mVoiceEngine->SetTraceCallback(nullptr);
+ webrtc::VoiceEngine::Delete(mVoiceEngine);
+ }
+
+ mVoiceEngine = nullptr;
+
+ mozilla::camera::Shutdown();
+ AudioInputCubeb::CleanupGlobalData();
+}
+
+}
diff --git a/dom/media/webrtc/MediaEngineWebRTC.h b/dom/media/webrtc/MediaEngineWebRTC.h
new file mode 100644
index 000000000..1834f3bd3
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -0,0 +1,613 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIAENGINEWEBRTC_H_
+#define MEDIAENGINEWEBRTC_H_
+
+#include "prcvar.h"
+#include "prthread.h"
+#include "prprf.h"
+#include "nsIThread.h"
+#include "nsIRunnable.h"
+
+#include "mozilla/dom/File.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/UniquePtr.h"
+#include "nsAutoPtr.h"
+#include "nsCOMPtr.h"
+#include "nsThreadUtils.h"
+#include "DOMMediaStream.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsComponentManagerUtils.h"
+#include "nsRefPtrHashtable.h"
+
+#include "VideoUtils.h"
+#include "MediaEngineCameraVideoSource.h"
+#include "VideoSegment.h"
+#include "AudioSegment.h"
+#include "StreamTracks.h"
+#include "MediaStreamGraph.h"
+#include "cubeb/cubeb.h"
+#include "CubebUtils.h"
+#include "AudioPacketizer.h"
+
+#include "MediaEngineWrapper.h"
+#include "mozilla/dom/MediaStreamTrackBinding.h"
+// WebRTC library includes follow
+#include "webrtc/common.h"
+// Audio Engine
+#include "webrtc/voice_engine/include/voe_base.h"
+#include "webrtc/voice_engine/include/voe_codec.h"
+#include "webrtc/voice_engine/include/voe_hardware.h"
+#include "webrtc/voice_engine/include/voe_network.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
+#include "webrtc/voice_engine/include/voe_volume_control.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+
+// Video Engine
+// conflicts with #include of scoped_ptr.h
+#undef FF
+#include "webrtc/video_engine/include/vie_base.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "webrtc/video_engine/include/vie_render.h"
+#include "webrtc/video_engine/include/vie_capture.h"
+#include "CamerasChild.h"
+
+#include "NullTransport.h"
+#include "AudioOutputObserver.h"
+
+namespace mozilla {
+
+class MediaEngineWebRTCAudioCaptureSource : public MediaEngineAudioSource
+{
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
+ : MediaEngineAudioSource(kReleased)
+ {
+ }
+ void GetName(nsAString& aName) const override;
+ void GetUUID(nsACString& aUUID) const override;
+ nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const nsACString& aOrigin,
+ AllocationHandle** aOutHandle,
+ const char** aOutBadConstraint) override
+ {
+ // Nothing to do here, everything is managed in MediaManager.cpp
+ *aOutHandle = nullptr;
+ return NS_OK;
+ }
+ nsresult Deallocate(AllocationHandle* aHandle) override
+ {
+ // Nothing to do here, everything is managed in MediaManager.cpp
+ MOZ_ASSERT(!aHandle);
+ return NS_OK;
+ }
+ nsresult Start(SourceMediaStream* aMediaStream,
+ TrackID aId,
+ const PrincipalHandle& aPrincipalHandle) override;
+ nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
+ nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+ void SetDirectListeners(bool aDirect) override
+ {}
+ void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override
+ {}
+ void DeviceChanged() override
+ {}
+ void NotifyInputData(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override
+ {}
+ void NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream* aSource,
+ TrackID aID,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle) override
+ {}
+ dom::MediaSourceEnum GetMediaSource() const override
+ {
+ return dom::MediaSourceEnum::AudioCapture;
+ }
+ bool IsFake() override
+ {
+ return false;
+ }
+ nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const override;
+
+protected:
+ virtual ~MediaEngineWebRTCAudioCaptureSource() {}
+ nsCString mUUID;
+};
+
+// Small subset of VoEHardware
+class AudioInput
+{
+public:
+ explicit AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
+ // Threadsafe because it's referenced from an MicrophoneSource, which can
+ // had references to it on other threads.
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
+
+ virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
+ virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
+ char aStrGuidUTF8[128]) = 0;
+ virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
+ virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
+ virtual void StopRecording(SourceMediaStream *aStream) = 0;
+ virtual int SetRecordingDevice(int aIndex) = 0;
+
+protected:
+ // Protected destructor, to discourage deletion outside of Release():
+ virtual ~AudioInput() {}
+
+ webrtc::VoiceEngine* mVoiceEngine;
+};
+
+class AudioInputCubeb final : public AudioInput
+{
+public:
+ explicit AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine, int aIndex = 0) :
+ AudioInput(aVoiceEngine), mSelectedDevice(aIndex), mInUseCount(0)
+ {
+ if (!mDeviceIndexes) {
+ mDeviceIndexes = new nsTArray<int>;
+ mDeviceNames = new nsTArray<nsCString>;
+ mDefaultDevice = -1;
+ }
+ }
+
+ static void CleanupGlobalData()
+ {
+ if (mDevices) {
+ // This doesn't require anything more than support for free()
+ cubeb_device_collection_destroy(mDevices);
+ mDevices = nullptr;
+ }
+ delete mDeviceIndexes;
+ mDeviceIndexes = nullptr;
+ delete mDeviceNames;
+ mDeviceNames = nullptr;
+ }
+
+ int GetNumOfRecordingDevices(int& aDevices)
+ {
+ UpdateDeviceList();
+ aDevices = mDeviceIndexes->Length();
+ return 0;
+ }
+
+ static int32_t DeviceIndex(int aIndex)
+ {
+ // -1 = system default if any
+ if (aIndex == -1) {
+ if (mDefaultDevice == -1) {
+ aIndex = 0;
+ } else {
+ aIndex = mDefaultDevice;
+ }
+ }
+ if (aIndex < 0 || aIndex >= (int) mDeviceIndexes->Length()) {
+ return -1;
+ }
+ // Note: if the device is gone, this will be -1
+ return (*mDeviceIndexes)[aIndex]; // translate to mDevices index
+ }
+
+ static StaticMutex& Mutex()
+ {
+ return sMutex;
+ }
+
+ static bool GetDeviceID(int aDeviceIndex, CubebUtils::AudioDeviceID &aID)
+ {
+ // Assert sMutex is held
+ sMutex.AssertCurrentThreadOwns();
+ int dev_index = DeviceIndex(aDeviceIndex);
+ if (dev_index != -1) {
+ aID = mDevices->device[dev_index]->devid;
+ return true;
+ }
+ return false;
+ }
+
+ int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
+ char aStrGuidUTF8[128])
+ {
+ int32_t devindex = DeviceIndex(aIndex);
+ if (!mDevices || devindex < 0) {
+ return 1;
+ }
+ PR_snprintf(aStrNameUTF8, 128, "%s%s", aIndex == -1 ? "default: " : "",
+ mDevices->device[devindex]->friendly_name);
+ aStrGuidUTF8[0] = '\0';
+ return 0;
+ }
+
+ int GetRecordingDeviceStatus(bool& aIsAvailable)
+ {
+ // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
+ // so unless it was removed, say it's available
+ aIsAvailable = true;
+ return 0;
+ }
+
+ void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener)
+ {
+ MOZ_ASSERT(mDevices);
+
+ if (mInUseCount == 0) {
+ ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
+ ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
+ if (ptrVoERender) {
+ ptrVoERender->SetExternalRecordingStatus(true);
+ }
+ mAnyInUse = true;
+ }
+ mInUseCount++;
+ // Always tell the stream we're using it for input
+ aStream->OpenAudioInput(mSelectedDevice, aListener);
+ }
+
+ void StopRecording(SourceMediaStream *aStream)
+ {
+ aStream->CloseAudioInput();
+ if (--mInUseCount == 0) {
+ mAnyInUse = false;
+ }
+ }
+
+ int SetRecordingDevice(int aIndex)
+ {
+ mSelectedDevice = aIndex;
+ return 0;
+ }
+
+protected:
+ ~AudioInputCubeb() {
+ MOZ_RELEASE_ASSERT(mInUseCount == 0);
+ }
+
+private:
+ // It would be better to watch for device-change notifications
+ void UpdateDeviceList();
+
+ // We have an array, which consists of indexes to the current mDevices
+ // list. This is updated on mDevices updates. Many devices in mDevices
+ // won't be included in the array (wrong type, etc), or if a device is
+ // removed it will map to -1 (and opens of this device will need to check
+ // for this - and be careful of threading access. The mappings need to
+ // updated on each re-enumeration.
+ int mSelectedDevice;
+ uint32_t mInUseCount;
+
+ // pointers to avoid static constructors
+ static nsTArray<int>* mDeviceIndexes;
+ static int mDefaultDevice; // -1 == not set
+ static nsTArray<nsCString>* mDeviceNames;
+ static cubeb_device_collection *mDevices;
+ static bool mAnyInUse;
+ static StaticMutex sMutex;
+};
+
+class AudioInputWebRTC final : public AudioInput
+{
+public:
+ explicit AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
+
+ int GetNumOfRecordingDevices(int& aDevices)
+ {
+ ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
+ ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
+ if (!ptrVoEHw) {
+ return 1;
+ }
+ return ptrVoEHw->GetNumOfRecordingDevices(aDevices);
+ }
+
+ int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
+ char aStrGuidUTF8[128])
+ {
+ ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
+ ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
+ if (!ptrVoEHw) {
+ return 1;
+ }
+ return ptrVoEHw->GetRecordingDeviceName(aIndex, aStrNameUTF8,
+ aStrGuidUTF8);
+ }
+
+ int GetRecordingDeviceStatus(bool& aIsAvailable)
+ {
+ ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
+ ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
+ if (!ptrVoEHw) {
+ return 1;
+ }
+ ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
+ return 0;
+ }
+
+ void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) {}
+ void StopRecording(SourceMediaStream *aStream) {}
+
+ int SetRecordingDevice(int aIndex)
+ {
+ ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
+ ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
+ if (!ptrVoEHw) {
+ return 1;
+ }
+ return ptrVoEHw->SetRecordingDevice(aIndex);
+ }
+
+protected:
+ // Protected destructor, to discourage deletion outside of Release():
+ ~AudioInputWebRTC() {}
+};
+
+class WebRTCAudioDataListener : public AudioDataListener
+{
+protected:
+ // Protected destructor, to discourage deletion outside of Release():
+ virtual ~WebRTCAudioDataListener() {}
+
+public:
+ explicit WebRTCAudioDataListener(MediaEngineAudioSource* aAudioSource)
+ : mMutex("WebRTCAudioDataListener")
+ , mAudioSource(aAudioSource)
+ {}
+
+ // AudioDataListenerInterface methods
+ virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override
+ {
+ MutexAutoLock lock(mMutex);
+ if (mAudioSource) {
+ mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aRate, aChannels);
+ }
+ }
+ virtual void NotifyInputData(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override
+ {
+ MutexAutoLock lock(mMutex);
+ if (mAudioSource) {
+ mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aRate, aChannels);
+ }
+ }
+ virtual void DeviceChanged() override
+ {
+ MutexAutoLock lock(mMutex);
+ if (mAudioSource) {
+ mAudioSource->DeviceChanged();
+ }
+ }
+
+ void Shutdown()
+ {
+ MutexAutoLock lock(mMutex);
+ mAudioSource = nullptr;
+ }
+
+private:
+ Mutex mMutex;
+ RefPtr<MediaEngineAudioSource> mAudioSource;
+};
+
+class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
+ public webrtc::VoEMediaProcess
+{
+ typedef MediaEngineAudioSource Super;
+public:
+ MediaEngineWebRTCMicrophoneSource(webrtc::VoiceEngine* aVoiceEnginePtr,
+ mozilla::AudioInput* aAudioInput,
+ int aIndex,
+ const char* name,
+ const char* uuid);
+
+ void GetName(nsAString& aName) const override;
+ void GetUUID(nsACString& aUUID) const override;
+
+ nsresult Deallocate(AllocationHandle* aHandle) override;
+ nsresult Start(SourceMediaStream* aStream,
+ TrackID aID,
+ const PrincipalHandle& aPrincipalHandle) override;
+ nsresult Stop(SourceMediaStream* aSource, TrackID aID) override;
+ nsresult Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+ void SetDirectListeners(bool aHasDirectListeners) override {};
+
+ void NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream* aSource,
+ TrackID aId,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle) override;
+
+ // AudioDataListenerInterface methods
+ void NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override;
+ void NotifyInputData(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer, size_t aFrames,
+ TrackRate aRate, uint32_t aChannels) override;
+
+ void DeviceChanged() override;
+
+ bool IsFake() override {
+ return false;
+ }
+
+ dom::MediaSourceEnum GetMediaSource() const override {
+ return dom::MediaSourceEnum::Microphone;
+ }
+
+ nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const override;
+
+ // VoEMediaProcess.
+ void Process(int channel, webrtc::ProcessingTypes type,
+ int16_t audio10ms[], int length,
+ int samplingFreq, bool isStereo) override;
+
+ void Shutdown() override;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+protected:
+ ~MediaEngineWebRTCMicrophoneSource() {}
+
+private:
+ nsresult
+ UpdateSingleSource(const AllocationHandle* aHandle,
+ const NormalizedConstraints& aNetConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint) override;
+
+ void SetLastPrefs(const MediaEnginePrefs& aPrefs);
+
+ // These allocate/configure and release the channel
+ bool AllocChannel();
+ void FreeChannel();
+ // These start/stop VoEBase and associated interfaces
+ bool InitEngine();
+ void DeInitEngine();
+
+ // This is true when all processing is disabled, we can skip
+ // packetization, resampling and other processing passes.
+ bool PassThrough() {
+ return mSkipProcessing;
+ }
+ template<typename T>
+ void InsertInGraph(const T* aBuffer,
+ size_t aFrames,
+ uint32_t aChannels);
+
+ void PacketizeAndProcess(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer,
+ size_t aFrames,
+ TrackRate aRate,
+ uint32_t aChannels);
+
+ webrtc::VoiceEngine* mVoiceEngine;
+ RefPtr<mozilla::AudioInput> mAudioInput;
+ RefPtr<WebRTCAudioDataListener> mListener;
+
+ // Note: shared across all microphone sources - we don't want to Terminate()
+ // the VoEBase until there are no active captures
+ static int sChannelsOpen;
+ static ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
+ static ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
+ static ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
+ static ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
+
+ // accessed from the GraphDriver thread except for deletion
+ nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
+ ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERenderListener;
+
+ // mMonitor protects mSources[] and mPrinicpalIds[] access/changes, and
+ // transitions of mState from kStarted to kStopped (which are combined with
+ // EndTrack()). mSources[] and mPrincipalHandles[] are accessed from webrtc
+ // threads.
+ Monitor mMonitor;
+ nsTArray<RefPtr<SourceMediaStream>> mSources;
+ nsTArray<PrincipalHandle> mPrincipalHandles; // Maps to mSources.
+
+ int mCapIndex;
+ int mChannel;
+ MOZ_INIT_OUTSIDE_CTOR TrackID mTrackID;
+ bool mStarted;
+
+ nsString mDeviceName;
+ nsCString mDeviceUUID;
+
+ int32_t mSampleFrequency;
+ int32_t mPlayoutDelay;
+
+ NullTransport *mNullTransport;
+
+ nsTArray<int16_t> mInputBuffer;
+ // mSkipProcessing is true if none of the processing passes are enabled,
+ // because of prefs or constraints. This allows simply copying the audio into
+ // the MSG, skipping resampling and the whole webrtc.org code.
+ bool mSkipProcessing;
+
+ // To only update microphone when needed, we keep track of previous settings.
+ MediaEnginePrefs mLastPrefs;
+};
+
+class MediaEngineWebRTC : public MediaEngine
+{
+ typedef MediaEngine Super;
+public:
+ explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
+
+ virtual void SetFakeDeviceChangeEvents() override;
+
+ // Clients should ensure to clean-up sources video/audio sources
+ // before invoking Shutdown on this class.
+ void Shutdown() override;
+
+ // Returns whether the host supports duplex audio stream.
+ bool SupportsDuplex();
+
+ void EnumerateVideoDevices(dom::MediaSourceEnum,
+ nsTArray<RefPtr<MediaEngineVideoSource>>*) override;
+ void EnumerateAudioDevices(dom::MediaSourceEnum,
+ nsTArray<RefPtr<MediaEngineAudioSource>>*) override;
+private:
+ ~MediaEngineWebRTC() {
+ gFarendObserver = nullptr;
+ }
+
+ nsCOMPtr<nsIThread> mThread;
+
+ // gUM runnables can e.g. Enumerate from multiple threads
+ Mutex mMutex;
+ webrtc::VoiceEngine* mVoiceEngine;
+ webrtc::Config mConfig;
+ RefPtr<mozilla::AudioInput> mAudioInput;
+ bool mFullDuplex;
+ bool mExtendedFilter;
+ bool mDelayAgnostic;
+ bool mHasTabVideoSource;
+
+ // Store devices we've already seen in a hashtable for quick return.
+ // Maps UUID to MediaEngineSource (one set for audio, one for video).
+ nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
+ nsRefPtrHashtable<nsStringHashKey, MediaEngineAudioSource> mAudioSources;
+};
+
+}
+
+#endif /* NSMEDIAENGINEWEBRTC_H_ */
diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
new file mode 100644
index 000000000..0b8796aa8
--- /dev/null
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -0,0 +1,937 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaEngineWebRTC.h"
+#include <stdio.h>
+#include <algorithm>
+#include "mozilla/Assertions.h"
+#include "MediaTrackConstraints.h"
+#include "mtransport/runnable_utils.h"
+#include "nsAutoPtr.h"
+
+// scoped_ptr.h uses FF
+#ifdef FF
+#undef FF
+#endif
+#include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
+
+#define CHANNELS 1
+#define ENCODING "L16"
+#define DEFAULT_PORT 5555
+
+#define SAMPLE_RATE(freq) ((freq)*2*8) // bps, 16-bit samples
+#define SAMPLE_LENGTH(freq) (((freq)*10)/1000)
+
+// These are restrictions from the webrtc.org code
+#define MAX_CHANNELS 2
+#define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100
+
+#define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10
+static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH");
+
+namespace mozilla {
+
+#ifdef LOG
+#undef LOG
+#endif
+
+extern LogModule* GetMediaManagerLog();
+#define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
+#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
+
+/**
+ * Webrtc microphone source source.
+ */
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
+
+// XXX temp until MSG supports registration
+StaticRefPtr<AudioOutputObserver> gFarendObserver;
+
+int MediaEngineWebRTCMicrophoneSource::sChannelsOpen = 0;
+ScopedCustomReleasePtr<webrtc::VoEBase> MediaEngineWebRTCMicrophoneSource::mVoEBase;
+ScopedCustomReleasePtr<webrtc::VoEExternalMedia> MediaEngineWebRTCMicrophoneSource::mVoERender;
+ScopedCustomReleasePtr<webrtc::VoENetwork> MediaEngineWebRTCMicrophoneSource::mVoENetwork;
+ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> MediaEngineWebRTCMicrophoneSource::mVoEProcessing;
+
+AudioOutputObserver::AudioOutputObserver()
+ : mPlayoutFreq(0)
+ , mPlayoutChannels(0)
+ , mChunkSize(0)
+ , mSaved(nullptr)
+ , mSamplesSaved(0)
+{
+ // Buffers of 10ms chunks
+ mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10);
+}
+
+AudioOutputObserver::~AudioOutputObserver()
+{
+ Clear();
+ free(mSaved);
+ mSaved = nullptr;
+}
+
+void
+AudioOutputObserver::Clear()
+{
+ while (mPlayoutFifo->size() > 0) {
+ free(mPlayoutFifo->Pop());
+ }
+ // we'd like to touch mSaved here, but we can't if we might still be getting callbacks
+}
+
+FarEndAudioChunk *
+AudioOutputObserver::Pop()
+{
+ return (FarEndAudioChunk *) mPlayoutFifo->Pop();
+}
+
+uint32_t
+AudioOutputObserver::Size()
+{
+ return mPlayoutFifo->size();
+}
+
+void
+AudioOutputObserver::MixerCallback(AudioDataValue* aMixedBuffer,
+ AudioSampleFormat aFormat,
+ uint32_t aChannels,
+ uint32_t aFrames,
+ uint32_t aSampleRate)
+{
+ if (gFarendObserver) {
+ gFarendObserver->InsertFarEnd(aMixedBuffer, aFrames, false,
+ aSampleRate, aChannels, aFormat);
+ }
+}
+
+// static
+void
+AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrames, bool aOverran,
+ int aFreq, int aChannels, AudioSampleFormat aFormat)
+{
+ if (mPlayoutChannels != 0) {
+ if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_ASSERT(aChannels <= MAX_CHANNELS);
+ mPlayoutChannels = static_cast<uint32_t>(aChannels);
+ }
+ if (mPlayoutFreq != 0) {
+ if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) {
+ MOZ_CRASH();
+ }
+ } else {
+ MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ);
+ MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100.");
+ mPlayoutFreq = aFreq;
+ mChunkSize = aFreq/100; // 10ms
+ }
+
+#ifdef LOG_FAREND_INSERTION
+ static FILE *fp = fopen("insertfarend.pcm","wb");
+#endif
+
+ if (mSaved) {
+ // flag overrun as soon as possible, and only once
+ mSaved->mOverrun = aOverran;
+ aOverran = false;
+ }
+ // Rechunk to 10ms.
+ // The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms
+ // samples per call. Annoying...
+ while (aFrames) {
+ if (!mSaved) {
+ mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) +
+ (mChunkSize * aChannels - 1)*sizeof(int16_t));
+ mSaved->mSamples = mChunkSize;
+ mSaved->mOverrun = aOverran;
+ aOverran = false;
+ }
+ uint32_t to_copy = mChunkSize - mSamplesSaved;
+ if (to_copy > aFrames) {
+ to_copy = aFrames;
+ }
+
+ int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]);
+ ConvertAudioSamples(aBuffer, dest, to_copy * aChannels);
+
+#ifdef LOG_FAREND_INSERTION
+ if (fp) {
+ fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp);
+ }
+#endif
+ aFrames -= to_copy;
+ mSamplesSaved += to_copy;
+ aBuffer += to_copy * aChannels;
+
+ if (mSamplesSaved >= mChunkSize) {
+ int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size();
+ if (free_slots <= 0) {
+ // XXX We should flag an overrun for the reader. We can't drop data from it due to
+ // thread safety issues.
+ break;
+ } else {
+ mPlayoutFifo->Push((int8_t *) mSaved); // takes ownership
+ mSaved = nullptr;
+ mSamplesSaved = 0;
+ }
+ }
+ }
+}
+
+MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
+ webrtc::VoiceEngine* aVoiceEnginePtr,
+ mozilla::AudioInput* aAudioInput,
+ int aIndex,
+ const char* name,
+ const char* uuid)
+ : MediaEngineAudioSource(kReleased)
+ , mVoiceEngine(aVoiceEnginePtr)
+ , mAudioInput(aAudioInput)
+ , mMonitor("WebRTCMic.Monitor")
+ , mCapIndex(aIndex)
+ , mChannel(-1)
+ , mTrackID(TRACK_NONE)
+ , mStarted(false)
+ , mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
+ , mPlayoutDelay(0)
+ , mNullTransport(nullptr)
+ , mSkipProcessing(false)
+{
+ MOZ_ASSERT(aVoiceEnginePtr);
+ MOZ_ASSERT(aAudioInput);
+ mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
+ mDeviceUUID.Assign(uuid);
+ mListener = new mozilla::WebRTCAudioDataListener(this);
+ mSettings.mEchoCancellation.Construct(0);
+ mSettings.mMozAutoGainControl.Construct(0);
+ mSettings.mMozNoiseSuppression.Construct(0);
+ // We'll init lazily as needed
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName) const
+{
+ aName.Assign(mDeviceName);
+ return;
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID) const
+{
+ aUUID.Assign(mDeviceUUID);
+ return;
+}
+
+// GetBestFitnessDistance returns the best distance the capture device can offer
+// as a whole, given an accumulated number of ConstraintSets.
+// Ideal values are considered in the first ConstraintSet only.
+// Plain values are treated as Ideal in the first ConstraintSet.
+// Plain values are treated as Exact in subsequent ConstraintSets.
+// Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
+// A finite result may be used to calculate this device's ranking as a choice.
+
+uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const
+{
+ uint32_t distance = 0;
+
+ for (const auto* cs : aConstraintSets) {
+ distance = GetMinimumFitnessDistance(*cs, aDeviceId);
+ break; // distance is read from first entry only
+ }
+ return distance;
+}
+
+nsresult
+MediaEngineWebRTCMicrophoneSource::Restart(AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ AssertIsOnOwningThread();
+ MOZ_ASSERT(aHandle);
+ NormalizedConstraints constraints(aConstraints);
+ return ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId,
+ aOutBadConstraint);
+}
+
+bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b)
+{
+ return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
+};
+
+nsresult
+MediaEngineWebRTCMicrophoneSource::UpdateSingleSource(
+ const AllocationHandle* aHandle,
+ const NormalizedConstraints& aNetConstraints,
+ const MediaEnginePrefs& aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ FlattenedConstraints c(aNetConstraints);
+
+ MediaEnginePrefs prefs = aPrefs;
+ prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
+ prefs.mAgcOn = c.mMozAutoGainControl.Get(prefs.mAgcOn);
+ prefs.mNoiseOn = c.mMozNoiseSuppression.Get(prefs.mNoiseOn);
+
+ LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d",
+ prefs.mAecOn ? prefs.mAec : -1,
+ prefs.mAgcOn ? prefs.mAgc : -1,
+ prefs.mNoiseOn ? prefs.mNoise : -1,
+ prefs.mPlayoutDelay));
+
+ mPlayoutDelay = prefs.mPlayoutDelay;
+
+ switch (mState) {
+ case kReleased:
+ MOZ_ASSERT(aHandle);
+ if (sChannelsOpen == 0) {
+ if (!InitEngine()) {
+ LOG(("Audio engine is not initalized"));
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ // Until we fix (or wallpaper) support for multiple mic input
+ // (Bug 1238038) fail allocation for a second device
+ return NS_ERROR_FAILURE;
+ }
+ if (!AllocChannel()) {
+ LOG(("Audio device is not initalized"));
+ return NS_ERROR_FAILURE;
+ }
+ if (mAudioInput->SetRecordingDevice(mCapIndex)) {
+ FreeChannel();
+ return NS_ERROR_FAILURE;
+ }
+ LOG(("Audio device %d allocated", mCapIndex));
+ break;
+
+ case kStarted:
+ if (prefs == mLastPrefs) {
+ return NS_OK;
+ }
+ if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
+ MonitorAutoLock lock(mMonitor);
+ if (mSources.IsEmpty()) {
+ LOG(("Audio device %d reallocated", mCapIndex));
+ } else {
+ LOG(("Audio device %d allocated shared", mCapIndex));
+ }
+ }
+ break;
+
+ default:
+ LOG(("Audio device %d %s in ignored state %d", mCapIndex,
+ (aHandle? aHandle->mOrigin.get() : ""), mState));
+ break;
+ }
+
+ if (sChannelsOpen > 0) {
+ int error;
+
+ error = mVoEProcessing->SetEcStatus(prefs.mAecOn, (webrtc::EcModes)prefs.mAec);
+ if (error) {
+ LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error));
+ // Overhead of capturing all the time is very low (<0.1% of an audio only call)
+ if (prefs.mAecOn) {
+ error = mVoEProcessing->SetEcMetricsStatus(true);
+ if (error) {
+ LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error));
+ }
+ }
+ }
+ error = mVoEProcessing->SetAgcStatus(prefs.mAgcOn, (webrtc::AgcModes)prefs.mAgc);
+ if (error) {
+ LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
+ }
+ error = mVoEProcessing->SetNsStatus(prefs.mNoiseOn, (webrtc::NsModes)prefs.mNoise);
+ if (error) {
+ LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error));
+ }
+ }
+
+ mSkipProcessing = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
+ if (mSkipProcessing) {
+ mSampleFrequency = MediaEngine::USE_GRAPH_RATE;
+ }
+ SetLastPrefs(prefs);
+ return NS_OK;
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::SetLastPrefs(
+ const MediaEnginePrefs& aPrefs)
+{
+ mLastPrefs = aPrefs;
+
+ RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
+
+ NS_DispatchToMainThread(media::NewRunnableFrom([that, aPrefs]() mutable {
+ that->mSettings.mEchoCancellation.Value() = aPrefs.mAecOn;
+ that->mSettings.mMozAutoGainControl.Value() = aPrefs.mAgcOn;
+ that->mSettings.mMozNoiseSuppression.Value() = aPrefs.mNoiseOn;
+ return NS_OK;
+ }));
+}
+
+
+nsresult
+MediaEngineWebRTCMicrophoneSource::Deallocate(AllocationHandle* aHandle)
+{
+ AssertIsOnOwningThread();
+
+ Super::Deallocate(aHandle);
+
+ if (!mRegisteredHandles.Length()) {
+ // If empty, no callbacks to deliver data should be occuring
+ if (mState != kStopped && mState != kAllocated) {
+ return NS_ERROR_FAILURE;
+ }
+
+ FreeChannel();
+ LOG(("Audio device %d deallocated", mCapIndex));
+ } else {
+ LOG(("Audio device %d deallocated but still in use", mCapIndex));
+ }
+ return NS_OK;
+}
+
+nsresult
+MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
+ TrackID aID,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ AssertIsOnOwningThread();
+ if (sChannelsOpen == 0 || !aStream) {
+ return NS_ERROR_FAILURE;
+ }
+
+ {
+ MonitorAutoLock lock(mMonitor);
+ mSources.AppendElement(aStream);
+ mPrincipalHandles.AppendElement(aPrincipalHandle);
+ MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
+ }
+
+ AudioSegment* segment = new AudioSegment();
+ if (mSampleFrequency == MediaEngine::USE_GRAPH_RATE) {
+ mSampleFrequency = aStream->GraphRate();
+ }
+ aStream->AddAudioTrack(aID, mSampleFrequency, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
+
+ // XXX Make this based on the pref.
+ aStream->RegisterForAudioMixing();
+ LOG(("Start audio for stream %p", aStream));
+
+ if (!mListener) {
+ mListener = new mozilla::WebRTCAudioDataListener(this);
+ }
+ if (mState == kStarted) {
+ MOZ_ASSERT(aID == mTrackID);
+ // Make sure we're associated with this stream
+ mAudioInput->StartRecording(aStream, mListener);
+ return NS_OK;
+ }
+ mState = kStarted;
+ mTrackID = aID;
+
+ // Make sure logger starts before capture
+ AsyncLatencyLogger::Get(true);
+
+ // Register output observer
+ // XXX
+ MOZ_ASSERT(gFarendObserver);
+ gFarendObserver->Clear();
+
+ if (mVoEBase->StartReceive(mChannel)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Must be *before* StartSend() so it will notice we selected external input (full_duplex)
+ mAudioInput->StartRecording(aStream, mListener);
+
+ if (mVoEBase->StartSend(mChannel)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Attach external media processor, so this::Process will be called.
+ mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
+
+ return NS_OK;
+}
+
+nsresult
+MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
+{
+ AssertIsOnOwningThread();
+ {
+ MonitorAutoLock lock(mMonitor);
+
+ size_t sourceIndex = mSources.IndexOf(aSource);
+ if (sourceIndex == mSources.NoIndex) {
+ // Already stopped - this is allowed
+ return NS_OK;
+ }
+ mSources.RemoveElementAt(sourceIndex);
+ mPrincipalHandles.RemoveElementAt(sourceIndex);
+ MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
+
+ aSource->EndTrack(aID);
+
+ if (!mSources.IsEmpty()) {
+ mAudioInput->StopRecording(aSource);
+ return NS_OK;
+ }
+ if (mState != kStarted) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!mVoEBase) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mState = kStopped;
+ }
+ if (mListener) {
+ // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
+ mListener->Shutdown();
+ mListener = nullptr;
+ }
+
+ mAudioInput->StopRecording(aSource);
+
+ mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
+
+ if (mVoEBase->StopSend(mChannel)) {
+ return NS_ERROR_FAILURE;
+ }
+ if (mVoEBase->StopReceive(mChannel)) {
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
+ SourceMediaStream *aSource,
+ TrackID aID,
+ StreamTime aDesiredTime,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ // Ignore - we push audio data
+ LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
+ AudioDataValue* aBuffer,
+ size_t aFrames,
+ TrackRate aRate,
+ uint32_t aChannels)
+{
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer,
+ size_t aFrames,
+ TrackRate aRate,
+ uint32_t aChannels)
+{
+ // This will call Process() with data coming out of the AEC/NS/AGC/etc chain
+ if (!mPacketizer ||
+ mPacketizer->PacketSize() != aRate/100u ||
+ mPacketizer->Channels() != aChannels) {
+ // It's ok to drop the audio still in the packetizer here.
+ mPacketizer =
+ new AudioPacketizer<AudioDataValue, int16_t>(aRate/100, aChannels);
+ }
+
+ mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
+
+ while (mPacketizer->PacketsAvailable()) {
+ uint32_t samplesPerPacket = mPacketizer->PacketSize() *
+ mPacketizer->Channels();
+ if (mInputBuffer.Length() < samplesPerPacket) {
+ mInputBuffer.SetLength(samplesPerPacket);
+ }
+ int16_t* packet = mInputBuffer.Elements();
+ mPacketizer->Output(packet);
+
+ mVoERender->ExternalRecordingInsertData(packet, samplesPerPacket, aRate, 0);
+ }
+}
+
+template<typename T>
+void
+MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
+ size_t aFrames,
+ uint32_t aChannels)
+{
+ if (mState != kStarted) {
+ return;
+ }
+
+ size_t len = mSources.Length();
+ for (size_t i = 0; i < len; i++) {
+ if (!mSources[i]) {
+ continue;
+ }
+ RefPtr<SharedBuffer> buffer =
+ SharedBuffer::Create(aFrames * aChannels * sizeof(T));
+ PodCopy(static_cast<T*>(buffer->Data()),
+ aBuffer, aFrames * aChannels);
+
+ TimeStamp insertTime;
+ // Make sure we include the stream and the track.
+ // The 0:1 is a flag to note when we've done the final insert for a given input block.
+ LogTime(AsyncLatencyLogger::AudioTrackInsertion,
+ LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
+ (i+1 < len) ? 0 : 1, insertTime);
+
+ nsAutoPtr<AudioSegment> segment(new AudioSegment());
+ AutoTArray<const T*, 1> channels;
+ // XXX Bug 971528 - Support stereo capture in gUM
+ MOZ_ASSERT(aChannels == 1,
+ "GraphDriver only supports us stereo audio for now");
+ channels.AppendElement(static_cast<T*>(buffer->Data()));
+ segment->AppendFrames(buffer.forget(), channels, aFrames,
+ mPrincipalHandles[i]);
+ segment->GetStartTime(insertTime);
+
+ mSources[i]->AppendToTrack(mTrackID, segment);
+ }
+}
+
+// Called back on GraphDriver thread!
+// Note this can be called back after ::Shutdown()
+void
+MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
+ const AudioDataValue* aBuffer,
+ size_t aFrames,
+ TrackRate aRate,
+ uint32_t aChannels)
+{
+ // If some processing is necessary, packetize and insert in the WebRTC.org
+ // code. Otherwise, directly insert the mic data in the MSG, bypassing all processing.
+ if (PassThrough()) {
+ InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels);
+ } else {
+ PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels);
+ }
+}
+
+#define ResetProcessingIfNeeded(_processing) \
+do { \
+ webrtc::_processing##Modes mode; \
+ int rv = mVoEProcessing->Get##_processing##Status(enabled, mode); \
+ if (rv) { \
+ NS_WARNING("Could not get the status of the " \
+ #_processing " on device change."); \
+ return; \
+ } \
+ \
+ if (enabled) { \
+ rv = mVoEProcessing->Set##_processing##Status(!enabled); \
+ if (rv) { \
+ NS_WARNING("Could not reset the status of the " \
+ #_processing " on device change."); \
+ return; \
+ } \
+ \
+ rv = mVoEProcessing->Set##_processing##Status(enabled); \
+ if (rv) { \
+ NS_WARNING("Could not reset the status of the " \
+ #_processing " on device change."); \
+ return; \
+ } \
+ } \
+} while(0)
+
+void
+MediaEngineWebRTCMicrophoneSource::DeviceChanged() {
+ // Reset some processing
+ bool enabled;
+ ResetProcessingIfNeeded(Agc);
+ ResetProcessingIfNeeded(Ec);
+ ResetProcessingIfNeeded(Ns);
+}
+
+bool
+MediaEngineWebRTCMicrophoneSource::InitEngine()
+{
+ MOZ_ASSERT(!mVoEBase);
+ mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
+
+ mVoEBase->Init();
+
+ mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
+ if (mVoERender) {
+ mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine);
+ if (mVoENetwork) {
+ mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine);
+ if (mVoEProcessing) {
+ mNullTransport = new NullTransport();
+ return true;
+ }
+ }
+ }
+ DeInitEngine();
+ return false;
+}
+
+// This shuts down the engine when no channel is open
+void
+MediaEngineWebRTCMicrophoneSource::DeInitEngine()
+{
+ if (mVoEBase) {
+ mVoEBase->Terminate();
+ delete mNullTransport;
+ mNullTransport = nullptr;
+
+ mVoEProcessing = nullptr;
+ mVoENetwork = nullptr;
+ mVoERender = nullptr;
+ mVoEBase = nullptr;
+ }
+}
+
+// This shuts down the engine when no channel is open.
+// mState records if a channel is allocated (slightly redundantly to mChannel)
+void
+MediaEngineWebRTCMicrophoneSource::FreeChannel()
+{
+ if (mState != kReleased) {
+ if (mChannel != -1) {
+ MOZ_ASSERT(mVoENetwork && mVoEBase);
+ if (mVoENetwork) {
+ mVoENetwork->DeRegisterExternalTransport(mChannel);
+ }
+ if (mVoEBase) {
+ mVoEBase->DeleteChannel(mChannel);
+ }
+ mChannel = -1;
+ }
+ mState = kReleased;
+
+ MOZ_ASSERT(sChannelsOpen > 0);
+ if (--sChannelsOpen == 0) {
+ DeInitEngine();
+ }
+ }
+}
+
+bool
+MediaEngineWebRTCMicrophoneSource::AllocChannel()
+{
+ MOZ_ASSERT(mVoEBase);
+
+ mChannel = mVoEBase->CreateChannel();
+ if (mChannel >= 0) {
+ if (!mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) {
+ mSampleFrequency = MediaEngine::DEFAULT_SAMPLE_RATE;
+ LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
+
+ // Check for availability.
+ if (!mAudioInput->SetRecordingDevice(mCapIndex)) {
+#ifndef MOZ_B2G
+ // Because of the permission mechanism of B2G, we need to skip the status
+ // check here.
+ bool avail = false;
+ mAudioInput->GetRecordingDeviceStatus(avail);
+ if (!avail) {
+ if (sChannelsOpen == 0) {
+ DeInitEngine();
+ }
+ return false;
+ }
+#endif // MOZ_B2G
+
+ // Set "codec" to PCM, 32kHz on 1 channel
+ ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
+ if (ptrVoECodec) {
+ webrtc::CodecInst codec;
+ strcpy(codec.plname, ENCODING);
+ codec.channels = CHANNELS;
+ MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
+ codec.rate = SAMPLE_RATE(mSampleFrequency);
+ codec.plfreq = mSampleFrequency;
+ codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
+ codec.pltype = 0; // Default payload type
+
+ if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
+ mState = kAllocated;
+ sChannelsOpen++;
+ return true;
+ }
+ }
+ }
+ }
+ }
+ mVoEBase->DeleteChannel(mChannel);
+ mChannel = -1;
+ if (sChannelsOpen == 0) {
+ DeInitEngine();
+ }
+ return false;
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::Shutdown()
+{
+ Super::Shutdown();
+ if (mListener) {
+ // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
+ mListener->Shutdown();
+ // Don't release the webrtc.org pointers yet until the Listener is (async) shutdown
+ mListener = nullptr;
+ }
+
+ if (mState == kStarted) {
+ SourceMediaStream *source;
+ bool empty;
+
+ while (1) {
+ {
+ MonitorAutoLock lock(mMonitor);
+ empty = mSources.IsEmpty();
+ if (empty) {
+ break;
+ }
+ source = mSources[0];
+ }
+ Stop(source, kAudioTrack); // XXX change to support multiple tracks
+ }
+ MOZ_ASSERT(mState == kStopped);
+ }
+
+ while (mRegisteredHandles.Length()) {
+ MOZ_ASSERT(mState == kAllocated || mState == kStopped);
+ // on last Deallocate(), FreeChannel()s and DeInit()s if all channels are released
+ Deallocate(mRegisteredHandles[0].get());
+ }
+ MOZ_ASSERT(mState == kReleased);
+
+ mAudioInput = nullptr;
+}
+
+typedef int16_t sample;
+
+void
+MediaEngineWebRTCMicrophoneSource::Process(int channel,
+ webrtc::ProcessingTypes type,
+ sample *audio10ms, int length,
+ int samplingFreq, bool isStereo)
+{
+ MOZ_ASSERT(!PassThrough(), "This should be bypassed when in PassThrough mode.");
+ // On initial capture, throw away all far-end data except the most recent sample
+ // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
+ // input code with "old" audio.
+ if (!mStarted) {
+ mStarted = true;
+ while (gFarendObserver->Size() > 1) {
+ free(gFarendObserver->Pop()); // only call if size() > 0
+ }
+ }
+
+ while (gFarendObserver->Size() > 0) {
+ FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
+ if (buffer) {
+ int length = buffer->mSamples;
+ int res = mVoERender->ExternalPlayoutData(buffer->mData,
+ gFarendObserver->PlayoutFrequency(),
+ gFarendObserver->PlayoutChannels(),
+ mPlayoutDelay,
+ length);
+ free(buffer);
+ if (res == -1) {
+ return;
+ }
+ }
+ }
+
+ MonitorAutoLock lock(mMonitor);
+ if (mState != kStarted)
+ return;
+
+ MOZ_ASSERT(!isStereo);
+ InsertInGraph<int16_t>(audio10ms, length, 1);
+ return;
+}
+
+void
+MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName) const
+{
+ aName.AssignLiteral("AudioCapture");
+}
+
+void
+MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID) const
+{
+ nsID uuid;
+ char uuidBuffer[NSID_LENGTH];
+ nsCString asciiString;
+ ErrorResult rv;
+
+ rv = nsContentUtils::GenerateUUIDInPlace(uuid);
+ if (rv.Failed()) {
+ aUUID.AssignLiteral("");
+ return;
+ }
+
+
+ uuid.ToProvidedString(uuidBuffer);
+ asciiString.AssignASCII(uuidBuffer);
+
+ // Remove {} and the null terminator
+ aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
+}
+
+nsresult
+MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
+ TrackID aId,
+ const PrincipalHandle& aPrincipalHandle)
+{
+ AssertIsOnOwningThread();
+ aMediaStream->AddTrack(aId, 0, new AudioSegment());
+ return NS_OK;
+}
+
+nsresult
+MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
+ TrackID aId)
+{
+ AssertIsOnOwningThread();
+ aMediaStream->EndAllTrackAndFinish();
+ return NS_OK;
+}
+
+nsresult
+MediaEngineWebRTCAudioCaptureSource::Restart(
+ AllocationHandle* aHandle,
+ const dom::MediaTrackConstraints& aConstraints,
+ const MediaEnginePrefs &aPrefs,
+ const nsString& aDeviceId,
+ const char** aOutBadConstraint)
+{
+ MOZ_ASSERT(!aHandle);
+ return NS_OK;
+}
+
+uint32_t
+MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ const nsString& aDeviceId) const
+{
+ // There is only one way of capturing audio for now, and it's always adequate.
+ return 0;
+}
+
+}
diff --git a/dom/media/webrtc/MediaTrackConstraints.cpp b/dom/media/webrtc/MediaTrackConstraints.cpp
new file mode 100644
index 000000000..6225b6d49
--- /dev/null
+++ b/dom/media/webrtc/MediaTrackConstraints.cpp
@@ -0,0 +1,469 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaTrackConstraints.h"
+#include "mozilla/dom/MediaStreamTrackBinding.h"
+
+#include <limits>
+#include <algorithm>
+#include <iterator>
+
+namespace mozilla {
+
+template<class ValueType>
+template<class ConstrainRange>
+void
+NormalizedConstraintSet::Range<ValueType>::SetFrom(const ConstrainRange& aOther)
+{
+ if (aOther.mIdeal.WasPassed()) {
+ mIdeal.emplace(aOther.mIdeal.Value());
+ }
+ if (aOther.mExact.WasPassed()) {
+ mMin = aOther.mExact.Value();
+ mMax = aOther.mExact.Value();
+ } else {
+ if (aOther.mMin.WasPassed()) {
+ mMin = aOther.mMin.Value();
+ }
+ if (aOther.mMax.WasPassed()) {
+ mMax = aOther.mMax.Value();
+ }
+ }
+}
+
+// The Range code works surprisingly well for bool, except when averaging ideals.
+template<>
+bool
+NormalizedConstraintSet::Range<bool>::Merge(const Range& aOther) {
+ if (!Intersects(aOther)) {
+ return false;
+ }
+ Intersect(aOther);
+
+ // To avoid "unsafe use of type 'bool'", we keep counter in mMergeDenominator
+ uint32_t counter = mMergeDenominator >> 16;
+ uint32_t denominator = mMergeDenominator & 0xffff;
+
+ if (aOther.mIdeal.isSome()) {
+ if (mIdeal.isNothing()) {
+ mIdeal.emplace(aOther.Get(false));
+ counter = aOther.Get(false);
+ denominator = 1;
+ } else {
+ if (!denominator) {
+ counter = Get(false);
+ denominator = 1;
+ }
+ counter += aOther.Get(false);
+ denominator++;
+ }
+ }
+ mMergeDenominator = ((counter & 0xffff) << 16) + (denominator & 0xffff);
+ return true;
+}
+
+template<>
+void
+NormalizedConstraintSet::Range<bool>::FinalizeMerge()
+{
+ if (mMergeDenominator) {
+ uint32_t counter = mMergeDenominator >> 16;
+ uint32_t denominator = mMergeDenominator & 0xffff;
+
+ *mIdeal = !!(counter / denominator);
+ mMergeDenominator = 0;
+ }
+}
+
+NormalizedConstraintSet::LongRange::LongRange(
+ LongPtrType aMemberPtr,
+ const char* aName,
+ const dom::OwningLongOrConstrainLongRange& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList)
+: Range<int32_t>((MemberPtrType)aMemberPtr, aName,
+ 1 + INT32_MIN, INT32_MAX, // +1 avoids Windows compiler bug
+ aList)
+{
+ if (aOther.IsLong()) {
+ if (advanced) {
+ mMin = mMax = aOther.GetAsLong();
+ } else {
+ mIdeal.emplace(aOther.GetAsLong());
+ }
+ } else {
+ SetFrom(aOther.GetAsConstrainLongRange());
+ }
+}
+
+NormalizedConstraintSet::LongLongRange::LongLongRange(
+ LongLongPtrType aMemberPtr,
+ const char* aName,
+ const long long& aOther,
+ nsTArray<MemberPtrType>* aList)
+: Range<int64_t>((MemberPtrType)aMemberPtr, aName,
+ 1 + INT64_MIN, INT64_MAX, // +1 avoids Windows compiler bug
+ aList)
+{
+ mIdeal.emplace(aOther);
+}
+
+NormalizedConstraintSet::DoubleRange::DoubleRange(
+ DoublePtrType aMemberPtr,
+ const char* aName,
+ const dom::OwningDoubleOrConstrainDoubleRange& aOther, bool advanced,
+ nsTArray<MemberPtrType>* aList)
+: Range<double>((MemberPtrType)aMemberPtr, aName,
+ -std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::infinity(), aList)
+{
+ if (aOther.IsDouble()) {
+ if (advanced) {
+ mMin = mMax = aOther.GetAsDouble();
+ } else {
+ mIdeal.emplace(aOther.GetAsDouble());
+ }
+ } else {
+ SetFrom(aOther.GetAsConstrainDoubleRange());
+ }
+}
+
+NormalizedConstraintSet::BooleanRange::BooleanRange(
+ BooleanPtrType aMemberPtr,
+ const char* aName,
+ const dom::OwningBooleanOrConstrainBooleanParameters& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList)
+: Range<bool>((MemberPtrType)aMemberPtr, aName, false, true, aList)
+{
+ if (aOther.IsBoolean()) {
+ if (advanced) {
+ mMin = mMax = aOther.GetAsBoolean();
+ } else {
+ mIdeal.emplace(aOther.GetAsBoolean());
+ }
+ } else {
+ const dom::ConstrainBooleanParameters& r = aOther.GetAsConstrainBooleanParameters();
+ if (r.mIdeal.WasPassed()) {
+ mIdeal.emplace(r.mIdeal.Value());
+ }
+ if (r.mExact.WasPassed()) {
+ mMin = r.mExact.Value();
+ mMax = r.mExact.Value();
+ }
+ }
+}
+
+NormalizedConstraintSet::StringRange::StringRange(
+ StringPtrType aMemberPtr,
+ const char* aName,
+ const dom::OwningStringOrStringSequenceOrConstrainDOMStringParameters& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList)
+ : BaseRange((MemberPtrType)aMemberPtr, aName, aList)
+{
+ if (aOther.IsString()) {
+ if (advanced) {
+ mExact.insert(aOther.GetAsString());
+ } else {
+ mIdeal.insert(aOther.GetAsString());
+ }
+ } else if (aOther.IsStringSequence()) {
+ if (advanced) {
+ mExact.clear();
+ for (auto& str : aOther.GetAsStringSequence()) {
+ mExact.insert(str);
+ }
+ } else {
+ mIdeal.clear();
+ for (auto& str : aOther.GetAsStringSequence()) {
+ mIdeal.insert(str);
+ }
+ }
+ } else {
+ SetFrom(aOther.GetAsConstrainDOMStringParameters());
+ }
+}
+
+void
+NormalizedConstraintSet::StringRange::SetFrom(
+ const dom::ConstrainDOMStringParameters& aOther)
+{
+ if (aOther.mIdeal.WasPassed()) {
+ mIdeal.clear();
+ if (aOther.mIdeal.Value().IsString()) {
+ mIdeal.insert(aOther.mIdeal.Value().GetAsString());
+ } else {
+ for (auto& str : aOther.mIdeal.Value().GetAsStringSequence()) {
+ mIdeal.insert(str);
+ }
+ }
+ }
+ if (aOther.mExact.WasPassed()) {
+ mExact.clear();
+ if (aOther.mExact.Value().IsString()) {
+ mExact.insert(aOther.mExact.Value().GetAsString());
+ } else {
+ for (auto& str : aOther.mExact.Value().GetAsStringSequence()) {
+ mIdeal.insert(str);
+ }
+ }
+ }
+}
+
+auto
+NormalizedConstraintSet::StringRange::Clamp(const ValueType& n) const -> ValueType
+{
+ if (!mExact.size()) {
+ return n;
+ }
+ ValueType result;
+ for (auto& entry : n) {
+ if (mExact.find(entry) != mExact.end()) {
+ result.insert(entry);
+ }
+ }
+ return result;
+}
+
+bool
+NormalizedConstraintSet::StringRange::Intersects(const StringRange& aOther) const
+{
+ if (!mExact.size() || !aOther.mExact.size()) {
+ return true;
+ }
+
+ ValueType intersection;
+ set_intersection(mExact.begin(), mExact.end(),
+ aOther.mExact.begin(), aOther.mExact.end(),
+ std::inserter(intersection, intersection.begin()));
+ return !!intersection.size();
+}
+
+void
+NormalizedConstraintSet::StringRange::Intersect(const StringRange& aOther)
+{
+ if (!aOther.mExact.size()) {
+ return;
+ }
+
+ ValueType intersection;
+ set_intersection(mExact.begin(), mExact.end(),
+ aOther.mExact.begin(), aOther.mExact.end(),
+ std::inserter(intersection, intersection.begin()));
+ mExact = intersection;
+}
+
+bool
+NormalizedConstraintSet::StringRange::Merge(const StringRange& aOther)
+{
+ if (!Intersects(aOther)) {
+ return false;
+ }
+ Intersect(aOther);
+
+ ValueType unioned;
+ set_union(mIdeal.begin(), mIdeal.end(),
+ aOther.mIdeal.begin(), aOther.mIdeal.end(),
+ std::inserter(unioned, unioned.begin()));
+ mIdeal = unioned;
+ return true;
+}
+
+NormalizedConstraints::NormalizedConstraints(
+ const dom::MediaTrackConstraints& aOther,
+ nsTArray<MemberPtrType>* aList)
+ : NormalizedConstraintSet(aOther, false, aList)
+ , mBadConstraint(nullptr)
+{
+ if (aOther.mAdvanced.WasPassed()) {
+ for (auto& entry : aOther.mAdvanced.Value()) {
+ mAdvanced.push_back(NormalizedConstraintSet(entry, true));
+ }
+ }
+}
+
+// Merge constructor. Create net constraints out of merging a set of others.
+// This is only used to resolve competing constraints from concurrent requests,
+// something the spec doesn't cover.
+
+NormalizedConstraints::NormalizedConstraints(
+ const nsTArray<const NormalizedConstraints*>& aOthers)
+ : NormalizedConstraintSet(*aOthers[0])
+ , mBadConstraint(nullptr)
+{
+ for (auto& entry : aOthers[0]->mAdvanced) {
+ mAdvanced.push_back(entry);
+ }
+
+ // Create a list of member pointers.
+ nsTArray<MemberPtrType> list;
+ NormalizedConstraints dummy(dom::MediaTrackConstraints(), &list);
+
+ // Do intersection of all required constraints, and average of ideals,
+
+ for (uint32_t i = 1; i < aOthers.Length(); i++) {
+ auto& other = *aOthers[i];
+
+ for (auto& memberPtr : list) {
+ auto& member = this->*memberPtr;
+ auto& otherMember = other.*memberPtr;
+
+ if (!member.Merge(otherMember)) {
+ mBadConstraint = member.mName;
+ return;
+ }
+ }
+
+ for (auto& entry : other.mAdvanced) {
+ mAdvanced.push_back(entry);
+ }
+ }
+ for (auto& memberPtr : list) {
+ (this->*memberPtr).FinalizeMerge();
+ }
+
+ // ...except for resolution and frame rate where we take the highest ideal.
+ // This is a bit of a hack based on the perception that people would be more
+ // surprised if they were to get lower resolution than they ideally requested.
+ //
+ // The spec gives browsers leeway here, saying they "SHOULD use the one with
+ // the smallest fitness distance", and also does not directly address the
+ // problem of competing constraints at all. There is no real web interop issue
+ // here since this is more about interop with other tabs on the same browser.
+ //
+ // We should revisit this logic once we support downscaling of resolutions and
+ // decimating of frame rates, per track.
+
+ for (auto& other : aOthers) {
+ mWidth.TakeHighestIdeal(other->mWidth);
+ mHeight.TakeHighestIdeal(other->mHeight);
+
+ // Consider implicit 30 fps default in comparison of competing constraints.
+ // Avoids 160x90x10 and 640x480 becoming 1024x768x10 (fitness distance flaw)
+ // This pretty much locks in 30 fps or higher, except for single-tab use.
+ auto frameRate = other->mFrameRate;
+ if (frameRate.mIdeal.isNothing()) {
+ frameRate.mIdeal.emplace(30);
+ }
+ mFrameRate.TakeHighestIdeal(frameRate);
+ }
+}
+
+FlattenedConstraints::FlattenedConstraints(const NormalizedConstraints& aOther)
+: NormalizedConstraintSet(aOther)
+{
+ for (auto& set : aOther.mAdvanced) {
+ // Must only apply compatible i.e. inherently non-overconstraining sets
+ // This rule is pretty much why this code is centralized here.
+ if (mWidth.Intersects(set.mWidth) &&
+ mHeight.Intersects(set.mHeight) &&
+ mFrameRate.Intersects(set.mFrameRate)) {
+ mWidth.Intersect(set.mWidth);
+ mHeight.Intersect(set.mHeight);
+ mFrameRate.Intersect(set.mFrameRate);
+ }
+ if (mEchoCancellation.Intersects(set.mEchoCancellation)) {
+ mEchoCancellation.Intersect(set.mEchoCancellation);
+ }
+ if (mMozNoiseSuppression.Intersects(set.mMozNoiseSuppression)) {
+ mMozNoiseSuppression.Intersect(set.mMozNoiseSuppression);
+ }
+ if (mMozAutoGainControl.Intersects(set.mMozAutoGainControl)) {
+ mMozAutoGainControl.Intersect(set.mMozAutoGainControl);
+ }
+ }
+}
+
+// MediaEngine helper
+//
+// The full algorithm for all devices. Sources that don't list capabilities
+// need to fake it and hardcode some by populating mHardcodedCapabilities above.
+//
+// Fitness distance returned as integer math * 1000. Infinity = UINT32_MAX
+
+// First, all devices have a minimum distance based on their deviceId.
+// If you have no other constraints, use this one. Reused by all device types.
+
+uint32_t
+MediaConstraintsHelper::GetMinimumFitnessDistance(
+ const NormalizedConstraintSet &aConstraints,
+ const nsString& aDeviceId)
+{
+ return FitnessDistance(aDeviceId, aConstraints.mDeviceId);
+}
+
+template<class ValueType, class NormalizedRange>
+/* static */ uint32_t
+MediaConstraintsHelper::FitnessDistance(ValueType aN,
+ const NormalizedRange& aRange)
+{
+ if (aRange.mMin > aN || aRange.mMax < aN) {
+ return UINT32_MAX;
+ }
+ if (aN == aRange.mIdeal.valueOr(aN)) {
+ return 0;
+ }
+ return uint32_t(ValueType((std::abs(aN - aRange.mIdeal.value()) * 1000) /
+ std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
+}
+
+// Fitness distance returned as integer math * 1000. Infinity = UINT32_MAX
+
+/* static */ uint32_t
+MediaConstraintsHelper::FitnessDistance(
+ nsString aN,
+ const NormalizedConstraintSet::StringRange& aParams)
+{
+ if (aParams.mExact.size() && aParams.mExact.find(aN) == aParams.mExact.end()) {
+ return UINT32_MAX;
+ }
+ if (aParams.mIdeal.size() && aParams.mIdeal.find(aN) == aParams.mIdeal.end()) {
+ return 1000;
+ }
+ return 0;
+}
+
+template<class MediaEngineSourceType>
+const char*
+MediaConstraintsHelper::FindBadConstraint(
+ const NormalizedConstraints& aConstraints,
+ const MediaEngineSourceType& aMediaEngineSource,
+ const nsString& aDeviceId)
+{
+ class MockDevice
+ {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MockDevice);
+
+ explicit MockDevice(const MediaEngineSourceType* aMediaEngineSource,
+ const nsString& aDeviceId)
+ : mMediaEngineSource(aMediaEngineSource),
+ // The following dud code exists to avoid 'unused typedef' error on linux.
+ mDeviceId(MockDevice::HasThreadSafeRefCnt::value ? aDeviceId : nsString()) {}
+
+ uint32_t GetBestFitnessDistance(
+ const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
+ bool aIsChrome)
+ {
+ return mMediaEngineSource->GetBestFitnessDistance(aConstraintSets,
+ mDeviceId);
+ }
+
+ private:
+ ~MockDevice() {}
+
+ const MediaEngineSourceType* mMediaEngineSource;
+ nsString mDeviceId;
+ };
+
+ Unused << typename MockDevice::HasThreadSafeRefCnt();
+
+ nsTArray<RefPtr<MockDevice>> devices;
+ devices.AppendElement(new MockDevice(&aMediaEngineSource, aDeviceId));
+ return FindBadConstraint(aConstraints, devices);
+}
+
+}
diff --git a/dom/media/webrtc/MediaTrackConstraints.h b/dom/media/webrtc/MediaTrackConstraints.h
new file mode 100644
index 000000000..842fea0d2
--- /dev/null
+++ b/dom/media/webrtc/MediaTrackConstraints.h
@@ -0,0 +1,449 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file should not be included by other includes, as it contains code
+
+#ifndef MEDIATRACKCONSTRAINTS_H_
+#define MEDIATRACKCONSTRAINTS_H_
+
+#include "mozilla/Attributes.h"
+#include "mozilla/dom/MediaStreamTrackBinding.h"
+#include "mozilla/dom/MediaTrackConstraintSetBinding.h"
+#include "mozilla/dom/MediaTrackSupportedConstraintsBinding.h"
+
+#include <map>
+#include <set>
+#include <vector>
+
+namespace mozilla {
+
+template<class EnumValuesStrings, class Enum>
+static const char* EnumToASCII(const EnumValuesStrings& aStrings, Enum aValue) {
+ return aStrings[uint32_t(aValue)].value;
+}
+
+template<class EnumValuesStrings, class Enum>
+static Enum StringToEnum(const EnumValuesStrings& aStrings,
+ const nsAString& aValue, Enum aDefaultValue) {
+ for (size_t i = 0; aStrings[i].value; i++) {
+ if (aValue.EqualsASCII(aStrings[i].value)) {
+ return Enum(i);
+ }
+ }
+ return aDefaultValue;
+}
+
+// Helper classes for orthogonal constraints without interdependencies.
+// Instead of constraining values, constrain the constraints themselves.
+
+class NormalizedConstraintSet
+{
+protected:
+ class BaseRange
+ {
+ protected:
+ typedef BaseRange NormalizedConstraintSet::* MemberPtrType;
+
+ BaseRange(MemberPtrType aMemberPtr, const char* aName,
+ nsTArray<MemberPtrType>* aList) : mName(aName) {
+ if (aList) {
+ aList->AppendElement(aMemberPtr);
+ }
+ }
+ virtual ~BaseRange() {}
+ public:
+ virtual bool Merge(const BaseRange& aOther) = 0;
+ virtual void FinalizeMerge() = 0;
+
+ const char* mName;
+ };
+
+ typedef BaseRange NormalizedConstraintSet::* MemberPtrType;
+
+public:
+ template<class ValueType>
+ class Range : public BaseRange
+ {
+ public:
+ ValueType mMin, mMax;
+ Maybe<ValueType> mIdeal;
+
+ Range(MemberPtrType aMemberPtr, const char* aName, ValueType aMin,
+ ValueType aMax, nsTArray<MemberPtrType>* aList)
+ : BaseRange(aMemberPtr, aName, aList)
+ , mMin(aMin), mMax(aMax), mMergeDenominator(0) {}
+ virtual ~Range() {};
+
+ template<class ConstrainRange>
+ void SetFrom(const ConstrainRange& aOther);
+ ValueType Clamp(ValueType n) const { return std::max(mMin, std::min(n, mMax)); }
+ ValueType Get(ValueType defaultValue) const {
+ return Clamp(mIdeal.valueOr(defaultValue));
+ }
+ bool Intersects(const Range& aOther) const {
+ return mMax >= aOther.mMin && mMin <= aOther.mMax;
+ }
+ void Intersect(const Range& aOther) {
+ MOZ_ASSERT(Intersects(aOther));
+ mMin = std::max(mMin, aOther.mMin);
+ mMax = std::min(mMax, aOther.mMax);
+ }
+ bool Merge(const Range& aOther) {
+ if (!Intersects(aOther)) {
+ return false;
+ }
+ Intersect(aOther);
+
+ if (aOther.mIdeal.isSome()) {
+ // Ideal values, as stored, may be outside their min max range, so use
+ // clamped values in averaging, to avoid extreme outliers skewing results.
+ if (mIdeal.isNothing()) {
+ mIdeal.emplace(aOther.Get(0));
+ mMergeDenominator = 1;
+ } else {
+ if (!mMergeDenominator) {
+ *mIdeal = Get(0);
+ mMergeDenominator = 1;
+ }
+ *mIdeal += aOther.Get(0);
+ mMergeDenominator++;
+ }
+ }
+ return true;
+ }
+ void FinalizeMerge() override
+ {
+ if (mMergeDenominator) {
+ *mIdeal /= mMergeDenominator;
+ mMergeDenominator = 0;
+ }
+ }
+ void TakeHighestIdeal(const Range& aOther) {
+ if (aOther.mIdeal.isSome()) {
+ if (mIdeal.isNothing()) {
+ mIdeal.emplace(aOther.Get(0));
+ } else {
+ *mIdeal = std::max(Get(0), aOther.Get(0));
+ }
+ }
+ }
+ private:
+ bool Merge(const BaseRange& aOther) override {
+ return Merge(static_cast<const Range&>(aOther));
+ }
+
+ uint32_t mMergeDenominator;
+ };
+
+ struct LongRange : public Range<int32_t>
+ {
+ typedef LongRange NormalizedConstraintSet::* LongPtrType;
+
+ LongRange(LongPtrType aMemberPtr, const char* aName,
+ const dom::OwningLongOrConstrainLongRange& aOther, bool advanced,
+ nsTArray<MemberPtrType>* aList);
+ };
+
+ struct LongLongRange : public Range<int64_t>
+ {
+ typedef LongLongRange NormalizedConstraintSet::* LongLongPtrType;
+
+ LongLongRange(LongLongPtrType aMemberPtr, const char* aName,
+ const long long& aOther,
+ nsTArray<MemberPtrType>* aList);
+ };
+
+ struct DoubleRange : public Range<double>
+ {
+ typedef DoubleRange NormalizedConstraintSet::* DoublePtrType;
+
+ DoubleRange(DoublePtrType aMemberPtr,
+ const char* aName,
+ const dom::OwningDoubleOrConstrainDoubleRange& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList);
+ };
+
+ struct BooleanRange : public Range<bool>
+ {
+ typedef BooleanRange NormalizedConstraintSet::* BooleanPtrType;
+
+ BooleanRange(BooleanPtrType aMemberPtr, const char* aName,
+ const dom::OwningBooleanOrConstrainBooleanParameters& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList);
+
+ BooleanRange(BooleanPtrType aMemberPtr, const char* aName, const bool& aOther,
+ nsTArray<MemberPtrType>* aList)
+ : Range<bool>((MemberPtrType)aMemberPtr, aName, false, true, aList) {
+ mIdeal.emplace(aOther);
+ }
+ };
+
+ struct StringRange : public BaseRange
+ {
+ typedef std::set<nsString> ValueType;
+ ValueType mExact, mIdeal;
+
+ typedef StringRange NormalizedConstraintSet::* StringPtrType;
+
+ StringRange(StringPtrType aMemberPtr, const char* aName,
+ const dom::OwningStringOrStringSequenceOrConstrainDOMStringParameters& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList);
+
+ StringRange(StringPtrType aMemberPtr, const char* aName,
+ const nsString& aOther, nsTArray<MemberPtrType>* aList)
+ : BaseRange((MemberPtrType)aMemberPtr, aName, aList) {
+ mIdeal.insert(aOther);
+ }
+
+ ~StringRange() {}
+
+ void SetFrom(const dom::ConstrainDOMStringParameters& aOther);
+ ValueType Clamp(const ValueType& n) const;
+ ValueType Get(const ValueType& defaultValue) const {
+ return Clamp(mIdeal.size() ? mIdeal : defaultValue);
+ }
+ bool Intersects(const StringRange& aOther) const;
+ void Intersect(const StringRange& aOther);
+ bool Merge(const StringRange& aOther);
+ void FinalizeMerge() override {}
+ private:
+ bool Merge(const BaseRange& aOther) override {
+ return Merge(static_cast<const StringRange&>(aOther));
+ }
+ };
+
+ // All new constraints should be added here whether they use flattening or not
+ LongRange mWidth, mHeight;
+ DoubleRange mFrameRate;
+ StringRange mFacingMode;
+ StringRange mMediaSource;
+ LongLongRange mBrowserWindow;
+ BooleanRange mScrollWithPage;
+ StringRange mDeviceId;
+ LongRange mViewportOffsetX, mViewportOffsetY, mViewportWidth, mViewportHeight;
+ BooleanRange mEchoCancellation, mMozNoiseSuppression, mMozAutoGainControl;
+private:
+ typedef NormalizedConstraintSet T;
+public:
+ NormalizedConstraintSet(const dom::MediaTrackConstraintSet& aOther,
+ bool advanced,
+ nsTArray<MemberPtrType>* aList = nullptr)
+ : mWidth(&T::mWidth, "width", aOther.mWidth, advanced, aList)
+ , mHeight(&T::mHeight, "height", aOther.mHeight, advanced, aList)
+ , mFrameRate(&T::mFrameRate, "frameRate", aOther.mFrameRate, advanced, aList)
+ , mFacingMode(&T::mFacingMode, "facingMode", aOther.mFacingMode, advanced, aList)
+ , mMediaSource(&T::mMediaSource, "mediaSource", aOther.mMediaSource, aList)
+ , mBrowserWindow(&T::mBrowserWindow, "browserWindow",
+ aOther.mBrowserWindow.WasPassed() ?
+ aOther.mBrowserWindow.Value() : 0, aList)
+ , mScrollWithPage(&T::mScrollWithPage, "scrollWithPage",
+ aOther.mScrollWithPage.WasPassed() ?
+ aOther.mScrollWithPage.Value() : false, aList)
+ , mDeviceId(&T::mDeviceId, "deviceId", aOther.mDeviceId, advanced, aList)
+ , mViewportOffsetX(&T::mViewportOffsetX, "viewportOffsetX",
+ aOther.mViewportOffsetX, advanced, aList)
+ , mViewportOffsetY(&T::mViewportOffsetY, "viewportOffsetY",
+ aOther.mViewportOffsetY, advanced, aList)
+ , mViewportWidth(&T::mViewportWidth, "viewportWidth",
+ aOther.mViewportWidth, advanced, aList)
+ , mViewportHeight(&T::mViewportHeight, "viewportHeight",
+ aOther.mViewportHeight, advanced, aList)
+ , mEchoCancellation(&T::mEchoCancellation, "echoCancellation",
+ aOther.mEchoCancellation, advanced, aList)
+ , mMozNoiseSuppression(&T::mMozNoiseSuppression, "mozNoiseSuppression",
+ aOther.mMozNoiseSuppression,
+ advanced, aList)
+ , mMozAutoGainControl(&T::mMozAutoGainControl, "mozAutoGainControl",
+ aOther.mMozAutoGainControl, advanced, aList) {}
+};
+
+template<> bool NormalizedConstraintSet::Range<bool>::Merge(const Range& aOther);
+template<> void NormalizedConstraintSet::Range<bool>::FinalizeMerge();
+
+// Used instead of MediaTrackConstraints in lower-level code.
+struct NormalizedConstraints : public NormalizedConstraintSet
+{
+ explicit NormalizedConstraints(const dom::MediaTrackConstraints& aOther,
+ nsTArray<MemberPtrType>* aList = nullptr);
+
+ // Merge constructor
+ explicit NormalizedConstraints(
+ const nsTArray<const NormalizedConstraints*>& aOthers);
+
+ std::vector<NormalizedConstraintSet> mAdvanced;
+ const char* mBadConstraint;
+};
+
+// Flattened version is used in low-level code with orthogonal constraints only.
+struct FlattenedConstraints : public NormalizedConstraintSet
+{
+ explicit FlattenedConstraints(const NormalizedConstraints& aOther);
+
+ explicit FlattenedConstraints(const dom::MediaTrackConstraints& aOther)
+ : FlattenedConstraints(NormalizedConstraints(aOther)) {}
+};
+
+// A helper class for MediaEngines
+
+class MediaConstraintsHelper
+{
+protected:
+ template<class ValueType, class NormalizedRange>
+ static uint32_t FitnessDistance(ValueType aN, const NormalizedRange& aRange);
+ static uint32_t FitnessDistance(nsString aN,
+ const NormalizedConstraintSet::StringRange& aConstraint);
+
+ static uint32_t
+ GetMinimumFitnessDistance(const NormalizedConstraintSet &aConstraints,
+ const nsString& aDeviceId);
+
+ template<class DeviceType>
+ static bool
+ SomeSettingsFit(const NormalizedConstraints &aConstraints,
+ nsTArray<RefPtr<DeviceType>>& aDevices)
+ {
+ nsTArray<const NormalizedConstraintSet*> sets;
+ sets.AppendElement(&aConstraints);
+
+ MOZ_ASSERT(aDevices.Length());
+ for (auto& device : aDevices) {
+ if (device->GetBestFitnessDistance(sets, false) != UINT32_MAX) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+public:
+ // Apply constrains to a supplied list of devices (removes items from the list)
+
+ template<class DeviceType>
+ static const char*
+ SelectSettings(const NormalizedConstraints &aConstraints,
+ nsTArray<RefPtr<DeviceType>>& aDevices,
+ bool aIsChrome)
+ {
+ auto& c = aConstraints;
+
+ // First apply top-level constraints.
+
+ // Stack constraintSets that pass, starting with the required one, because the
+ // whole stack must be re-satisfied each time a capability-set is ruled out
+ // (this avoids storing state or pushing algorithm into the lower-level code).
+ nsTArray<RefPtr<DeviceType>> unsatisfactory;
+ nsTArray<const NormalizedConstraintSet*> aggregateConstraints;
+ aggregateConstraints.AppendElement(&c);
+
+ std::multimap<uint32_t, RefPtr<DeviceType>> ordered;
+
+ for (uint32_t i = 0; i < aDevices.Length();) {
+ uint32_t distance = aDevices[i]->GetBestFitnessDistance(aggregateConstraints,
+ aIsChrome);
+ if (distance == UINT32_MAX) {
+ unsatisfactory.AppendElement(aDevices[i]);
+ aDevices.RemoveElementAt(i);
+ } else {
+ ordered.insert(std::pair<uint32_t, RefPtr<DeviceType>>(distance,
+ aDevices[i]));
+ ++i;
+ }
+ }
+ if (!aDevices.Length()) {
+ return FindBadConstraint(c, unsatisfactory);
+ }
+
+ // Order devices by shortest distance
+ for (auto& ordinal : ordered) {
+ aDevices.RemoveElement(ordinal.second);
+ aDevices.AppendElement(ordinal.second);
+ }
+
+ // Then apply advanced constraints.
+
+ for (int i = 0; i < int(c.mAdvanced.size()); i++) {
+ aggregateConstraints.AppendElement(&c.mAdvanced[i]);
+ nsTArray<RefPtr<DeviceType>> rejects;
+ for (uint32_t j = 0; j < aDevices.Length();) {
+ if (aDevices[j]->GetBestFitnessDistance(aggregateConstraints,
+ aIsChrome) == UINT32_MAX) {
+ rejects.AppendElement(aDevices[j]);
+ aDevices.RemoveElementAt(j);
+ } else {
+ ++j;
+ }
+ }
+ if (!aDevices.Length()) {
+ aDevices.AppendElements(Move(rejects));
+ aggregateConstraints.RemoveElementAt(aggregateConstraints.Length() - 1);
+ }
+ }
+ return nullptr;
+ }
+
+ template<class DeviceType>
+ static const char*
+ FindBadConstraint(const NormalizedConstraints& aConstraints,
+ nsTArray<RefPtr<DeviceType>>& aDevices)
+ {
+ // The spec says to report a constraint that satisfies NONE
+ // of the sources. Unfortunately, this is a bit laborious to find out, and
+ // requires updating as new constraints are added!
+ auto& c = aConstraints;
+ dom::MediaTrackConstraints empty;
+
+ if (!aDevices.Length() ||
+ !SomeSettingsFit(NormalizedConstraints(empty), aDevices)) {
+ return "";
+ }
+ {
+ NormalizedConstraints fresh(empty);
+ fresh.mDeviceId = c.mDeviceId;
+ if (!SomeSettingsFit(fresh, aDevices)) {
+ return "deviceId";
+ }
+ }
+ {
+ NormalizedConstraints fresh(empty);
+ fresh.mWidth = c.mWidth;
+ if (!SomeSettingsFit(fresh, aDevices)) {
+ return "width";
+ }
+ }
+ {
+ NormalizedConstraints fresh(empty);
+ fresh.mHeight = c.mHeight;
+ if (!SomeSettingsFit(fresh, aDevices)) {
+ return "height";
+ }
+ }
+ {
+ NormalizedConstraints fresh(empty);
+ fresh.mFrameRate = c.mFrameRate;
+ if (!SomeSettingsFit(fresh, aDevices)) {
+ return "frameRate";
+ }
+ }
+ {
+ NormalizedConstraints fresh(empty);
+ fresh.mFacingMode = c.mFacingMode;
+ if (!SomeSettingsFit(fresh, aDevices)) {
+ return "facingMode";
+ }
+ }
+ return "";
+ }
+
+ template<class MediaEngineSourceType>
+ static const char*
+ FindBadConstraint(const NormalizedConstraints& aConstraints,
+ const MediaEngineSourceType& aMediaEngineSource,
+ const nsString& aDeviceId);
+};
+
+} // namespace mozilla
+
+#endif /* MEDIATRACKCONSTRAINTS_H_ */
diff --git a/dom/media/webrtc/PWebrtcGlobal.ipdl b/dom/media/webrtc/PWebrtcGlobal.ipdl
new file mode 100644
index 000000000..451634754
--- /dev/null
+++ b/dom/media/webrtc/PWebrtcGlobal.ipdl
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+include protocol PContent;
+
+include "mozilla/media/webrtc/WebrtcGlobal.h";
+
+using struct mozilla::dom::RTCStatsReportInternal from "mozilla/dom/RTCStatsReportBinding.h";
+using WebrtcGlobalLog from "mozilla/media/webrtc/WebrtcGlobal.h";
+
+namespace mozilla {
+namespace dom {
+
+async protocol PWebrtcGlobal {
+ manager PContent;
+
+child: // parent -> child messages
+ async GetStatsRequest(int aRequestId, nsString aPcIdFilter);
+ async ClearStatsRequest();
+ async GetLogRequest(int aRequestId, nsCString aPattern);
+ async ClearLogRequest();
+ async SetAecLogging(bool aEnable);
+ async SetDebugMode(int aLevel);
+
+parent: // child -> parent messages
+ async GetStatsResult(int aRequestId, RTCStatsReportInternal[] aStats);
+ async GetLogResult(int aRequestId, WebrtcGlobalLog aLog);
+ async __delete__();
+};
+
+} // end namespace net
+} // end namespace mozilla
diff --git a/dom/media/webrtc/PeerIdentity.cpp b/dom/media/webrtc/PeerIdentity.cpp
new file mode 100644
index 000000000..4b4abea3b
--- /dev/null
+++ b/dom/media/webrtc/PeerIdentity.cpp
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: sw=2 ts=2 sts=2 expandtab
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "PeerIdentity.h"
+
+#include "mozilla/DebugOnly.h"
+#include "nsCOMPtr.h"
+#include "nsIIDNService.h"
+#include "nsNetCID.h"
+#include "nsServiceManagerUtils.h"
+
+namespace mozilla {
+
+bool
+PeerIdentity::Equals(const PeerIdentity& aOther) const
+{
+ return Equals(aOther.mPeerIdentity);
+}
+
+bool
+PeerIdentity::Equals(const nsAString& aOtherString) const
+{
+ nsString user;
+ GetUser(mPeerIdentity, user);
+ nsString otherUser;
+ GetUser(aOtherString, otherUser);
+ if (user != otherUser) {
+ return false;
+ }
+
+ nsString host;
+ GetHost(mPeerIdentity, host);
+ nsString otherHost;
+ GetHost(aOtherString, otherHost);
+
+ nsresult rv;
+ nsCOMPtr<nsIIDNService> idnService
+ = do_GetService("@mozilla.org/network/idn-service;1", &rv);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return host == otherHost;
+ }
+
+ nsCString normHost;
+ GetNormalizedHost(idnService, host, normHost);
+ nsCString normOtherHost;
+ GetNormalizedHost(idnService, otherHost, normOtherHost);
+ return normHost == normOtherHost;
+}
+
+/* static */ void
+PeerIdentity::GetUser(const nsAString& aPeerIdentity, nsAString& aUser)
+{
+ int32_t at = aPeerIdentity.FindChar('@');
+ if (at >= 0) {
+ aUser = Substring(aPeerIdentity, 0, at);
+ } else {
+ aUser.Truncate();
+ }
+}
+
+/* static */ void
+PeerIdentity::GetHost(const nsAString& aPeerIdentity, nsAString& aHost)
+{
+ int32_t at = aPeerIdentity.FindChar('@');
+ if (at >= 0) {
+ aHost = Substring(aPeerIdentity, at + 1);
+ } else {
+ aHost = aPeerIdentity;
+ }
+}
+
+/* static */ void
+PeerIdentity::GetNormalizedHost(const nsCOMPtr<nsIIDNService>& aIdnService,
+ const nsAString& aHost,
+ nsACString& aNormalizedHost)
+{
+ const nsCString chost = NS_ConvertUTF16toUTF8(aHost);
+ DebugOnly<nsresult> rv = aIdnService->ConvertUTF8toACE(chost, aNormalizedHost);
+ NS_WARNING_ASSERTION(NS_SUCCEEDED(rv),
+ "Failed to convert UTF-8 host to ASCII");
+}
+
+} /* namespace mozilla */
diff --git a/dom/media/webrtc/PeerIdentity.h b/dom/media/webrtc/PeerIdentity.h
new file mode 100644
index 000000000..bdfa1d2b3
--- /dev/null
+++ b/dom/media/webrtc/PeerIdentity.h
@@ -0,0 +1,81 @@
+ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: sw=2 ts=2 sts=2 expandtab
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef PeerIdentity_h
+#define PeerIdentity_h
+
+#ifdef MOZILLA_INTERNAL_API
+#include "nsString.h"
+#else
+#include "nsStringAPI.h"
+#endif
+
+template <class T> class nsCOMPtr;
+class nsIIDNService;
+
+namespace mozilla {
+
+/**
+ * This class implements the identifier used in WebRTC identity. Peers are
+ * identified using a string in the form [<user>@]<domain>, for instance,
+ * "user@example.com'. The (optional) user portion is a site-controlled string
+ * containing any character other than '@'. The domain portion is a valid IDN
+ * domain name and is compared accordingly.
+ *
+ * See: http://tools.ietf.org/html/draft-ietf-rtcweb-security-arch-09#section-5.6.5.3.3.1
+ */
+class PeerIdentity final : public RefCounted<PeerIdentity>
+{
+public:
+ MOZ_DECLARE_REFCOUNTED_TYPENAME(PeerIdentity)
+
+ explicit PeerIdentity(const nsAString& aPeerIdentity)
+ : mPeerIdentity(aPeerIdentity) {}
+ ~PeerIdentity() {}
+
+ bool Equals(const PeerIdentity& aOther) const;
+ bool Equals(const nsAString& aOtherString) const;
+ const nsString& ToString() const { return mPeerIdentity; }
+
+private:
+ static void GetUser(const nsAString& aPeerIdentity, nsAString& aUser);
+ static void GetHost(const nsAString& aPeerIdentity, nsAString& aHost);
+
+ static void GetNormalizedHost(const nsCOMPtr<nsIIDNService>& aIdnService,
+ const nsAString& aHost,
+ nsACString& aNormalizedHost);
+
+ nsString mPeerIdentity;
+};
+
+inline bool
+operator==(const PeerIdentity& aOne, const PeerIdentity& aTwo)
+{
+ return aOne.Equals(aTwo);
+}
+
+inline bool
+operator==(const PeerIdentity& aOne, const nsAString& aString)
+{
+ return aOne.Equals(aString);
+}
+
+inline bool
+operator!=(const PeerIdentity& aOne, const PeerIdentity& aTwo)
+{
+ return !aOne.Equals(aTwo);
+}
+
+inline bool
+operator!=(const PeerIdentity& aOne, const nsAString& aString)
+{
+ return !aOne.Equals(aString);
+}
+
+
+} /* namespace mozilla */
+
+#endif /* PeerIdentity_h */
diff --git a/dom/media/webrtc/RTCCertificate.cpp b/dom/media/webrtc/RTCCertificate.cpp
new file mode 100644
index 000000000..3f778bcbb
--- /dev/null
+++ b/dom/media/webrtc/RTCCertificate.cpp
@@ -0,0 +1,462 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/RTCCertificate.h"
+
+#include <cmath>
+#include "cert.h"
+#include "jsapi.h"
+#include "mozilla/dom/CryptoKey.h"
+#include "mozilla/dom/RTCCertificateBinding.h"
+#include "mozilla/dom/WebCryptoCommon.h"
+#include "mozilla/dom/WebCryptoTask.h"
+#include "mozilla/Sprintf.h"
+
+#include <cstdio>
+
+namespace mozilla {
+namespace dom {
+
+#define RTCCERTIFICATE_SC_VERSION 0x00000001
+
+NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(RTCCertificate, mGlobal)
+NS_IMPL_CYCLE_COLLECTING_ADDREF(RTCCertificate)
+NS_IMPL_CYCLE_COLLECTING_RELEASE(RTCCertificate)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(RTCCertificate)
+ NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+// Note: explicit casts necessary to avoid
+// warning C4307: '*' : integral constant overflow
+#define ONE_DAY PRTime(PR_USEC_PER_SEC) * PRTime(60) /*sec*/ \
+ * PRTime(60) /*min*/ * PRTime(24) /*hours*/
+#define EXPIRATION_DEFAULT ONE_DAY * PRTime(30)
+#define EXPIRATION_SLACK ONE_DAY
+#define EXPIRATION_MAX ONE_DAY * PRTime(365) /*year*/
+
+const size_t RTCCertificateCommonNameLength = 16;
+const size_t RTCCertificateMinRsaSize = 1024;
+
+class GenerateRTCCertificateTask : public GenerateAsymmetricKeyTask
+{
+public:
+ GenerateRTCCertificateTask(nsIGlobalObject* aGlobal, JSContext* aCx,
+ const ObjectOrString& aAlgorithm,
+ const Sequence<nsString>& aKeyUsages,
+ PRTime aExpires)
+ : GenerateAsymmetricKeyTask(aGlobal, aCx, aAlgorithm, true, aKeyUsages),
+ mExpires(aExpires),
+ mAuthType(ssl_kea_null),
+ mCertificate(nullptr),
+ mSignatureAlg(SEC_OID_UNKNOWN)
+ {
+ }
+
+private:
+ PRTime mExpires;
+ SSLKEAType mAuthType;
+ UniqueCERTCertificate mCertificate;
+ SECOidTag mSignatureAlg;
+
+ static CERTName* GenerateRandomName(PK11SlotInfo* aSlot)
+ {
+ uint8_t randomName[RTCCertificateCommonNameLength];
+ SECStatus rv = PK11_GenerateRandomOnSlot(aSlot, randomName,
+ sizeof(randomName));
+ if (rv != SECSuccess) {
+ return nullptr;
+ }
+
+ char buf[sizeof(randomName) * 2 + 4];
+ PL_strncpy(buf, "CN=", 3);
+ for (size_t i = 0; i < sizeof(randomName); ++i) {
+ snprintf(&buf[i * 2 + 3], 2, "%.2x", randomName[i]);
+ }
+ buf[sizeof(buf) - 1] = '\0';
+
+ return CERT_AsciiToName(buf);
+ }
+
+ nsresult GenerateCertificate()
+ {
+ ScopedPK11SlotInfo slot(PK11_GetInternalSlot());
+ MOZ_ASSERT(slot.get());
+
+ ScopedCERTName subjectName(GenerateRandomName(slot.get()));
+ if (!subjectName) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ ScopedSECKEYPublicKey publicKey(mKeyPair->mPublicKey.get()->GetPublicKey());
+ ScopedCERTSubjectPublicKeyInfo spki(
+ SECKEY_CreateSubjectPublicKeyInfo(publicKey));
+ if (!spki) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ ScopedCERTCertificateRequest certreq(
+ CERT_CreateCertificateRequest(subjectName, spki, nullptr));
+ if (!certreq) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ PRTime now = PR_Now();
+ PRTime notBefore = now - EXPIRATION_SLACK;
+ mExpires += now;
+
+ ScopedCERTValidity validity(CERT_CreateValidity(notBefore, mExpires));
+ if (!validity) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ unsigned long serial;
+ // Note: This serial in principle could collide, but it's unlikely, and we
+ // don't expect anyone to be validating certificates anyway.
+ SECStatus rv =
+ PK11_GenerateRandomOnSlot(slot,
+ reinterpret_cast<unsigned char *>(&serial),
+ sizeof(serial));
+ if (rv != SECSuccess) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ CERTCertificate* cert = CERT_CreateCertificate(serial, subjectName,
+ validity, certreq);
+ if (!cert) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+ mCertificate.reset(cert);
+ return NS_OK;
+ }
+
+ nsresult SignCertificate()
+ {
+ MOZ_ASSERT(mSignatureAlg != SEC_OID_UNKNOWN);
+ PLArenaPool *arena = mCertificate->arena;
+
+ SECStatus rv = SECOID_SetAlgorithmID(arena, &mCertificate->signature,
+ mSignatureAlg, nullptr);
+ if (rv != SECSuccess) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ // Set version to X509v3.
+ *(mCertificate->version.data) = SEC_CERTIFICATE_VERSION_3;
+ mCertificate->version.len = 1;
+
+ SECItem innerDER = { siBuffer, nullptr, 0 };
+ if (!SEC_ASN1EncodeItem(arena, &innerDER, mCertificate.get(),
+ SEC_ASN1_GET(CERT_CertificateTemplate))) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ SECItem *signedCert = PORT_ArenaZNew(arena, SECItem);
+ if (!signedCert) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+
+ ScopedSECKEYPrivateKey privateKey(mKeyPair->mPrivateKey.get()->GetPrivateKey());
+ rv = SEC_DerSignData(arena, signedCert, innerDER.data, innerDER.len,
+ privateKey, mSignatureAlg);
+ if (rv != SECSuccess) {
+ return NS_ERROR_DOM_UNKNOWN_ERR;
+ }
+ mCertificate->derCert = *signedCert;
+ return NS_OK;
+ }
+
+ nsresult BeforeCrypto() override
+ {
+ if (mAlgName.EqualsLiteral(WEBCRYPTO_ALG_RSASSA_PKCS1)) {
+ // Double check that size is OK.
+ auto sz = static_cast<size_t>(mRsaParams.keySizeInBits);
+ if (sz < RTCCertificateMinRsaSize) {
+ return NS_ERROR_DOM_NOT_SUPPORTED_ERR;
+ }
+
+ KeyAlgorithmProxy& alg = mKeyPair->mPublicKey.get()->Algorithm();
+ if (alg.mType != KeyAlgorithmProxy::RSA ||
+ !alg.mRsa.mHash.mName.EqualsLiteral(WEBCRYPTO_ALG_SHA256)) {
+ return NS_ERROR_DOM_NOT_SUPPORTED_ERR;
+ }
+
+ mSignatureAlg = SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+ mAuthType = ssl_kea_rsa;
+
+ } else if (mAlgName.EqualsLiteral(WEBCRYPTO_ALG_ECDSA)) {
+ // We only support good curves in WebCrypto.
+ // If that ever changes, check that a good one was chosen.
+
+ mSignatureAlg = SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE;
+ mAuthType = ssl_kea_ecdh;
+ } else {
+ return NS_ERROR_DOM_NOT_SUPPORTED_ERR;
+ }
+ return NS_OK;
+ }
+
+ nsresult DoCrypto() override
+ {
+ nsresult rv = GenerateAsymmetricKeyTask::DoCrypto();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = GenerateCertificate();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = SignCertificate();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return NS_OK;
+ }
+
+ virtual void Resolve() override
+ {
+ // Make copies of the private key and certificate, otherwise, when this
+ // object is deleted, the structures they reference will be deleted too.
+ SECKEYPrivateKey* key = mKeyPair->mPrivateKey.get()->GetPrivateKey();
+ CERTCertificate* cert = CERT_DupCertificate(mCertificate.get());
+ RefPtr<RTCCertificate> result =
+ new RTCCertificate(mResultPromise->GetParentObject(),
+ key, cert, mAuthType, mExpires);
+ mResultPromise->MaybeResolve(result);
+ }
+};
+
+static PRTime
+ReadExpires(JSContext* aCx, const ObjectOrString& aOptions,
+ ErrorResult& aRv)
+{
+ // This conversion might fail, but we don't really care; use the default.
+ // If this isn't an object, or it doesn't coerce into the right type,
+ // then we won't get the |expires| value. Either will be caught later.
+ RTCCertificateExpiration expiration;
+ if (!aOptions.IsObject()) {
+ return EXPIRATION_DEFAULT;
+ }
+ JS::RootedValue value(aCx, JS::ObjectValue(*aOptions.GetAsObject()));
+ if (!expiration.Init(aCx, value)) {
+ aRv.NoteJSContextException(aCx);
+ return 0;
+ }
+
+ if (!expiration.mExpires.WasPassed()) {
+ return EXPIRATION_DEFAULT;
+ }
+ static const uint64_t max =
+ static_cast<uint64_t>(EXPIRATION_MAX / PR_USEC_PER_MSEC);
+ if (expiration.mExpires.Value() > max) {
+ return EXPIRATION_MAX;
+ }
+ return static_cast<PRTime>(expiration.mExpires.Value() * PR_USEC_PER_MSEC);
+}
+
+already_AddRefed<Promise>
+RTCCertificate::GenerateCertificate(
+ const GlobalObject& aGlobal, const ObjectOrString& aOptions,
+ ErrorResult& aRv, JSCompartment* aCompartment)
+{
+ nsIGlobalObject* global = xpc::NativeGlobal(aGlobal.Get());
+ RefPtr<Promise> p = Promise::Create(global, aRv);
+ if (aRv.Failed()) {
+ return nullptr;
+ }
+ Sequence<nsString> usages;
+ if (!usages.AppendElement(NS_LITERAL_STRING("sign"), fallible)) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return nullptr;
+ }
+
+ PRTime expires = ReadExpires(aGlobal.Context(), aOptions, aRv);
+ if (aRv.Failed()) {
+ return nullptr;
+ }
+ RefPtr<WebCryptoTask> task =
+ new GenerateRTCCertificateTask(global, aGlobal.Context(),
+ aOptions, usages, expires);
+ task->DispatchWithPromise(p);
+ return p.forget();
+}
+
+RTCCertificate::RTCCertificate(nsIGlobalObject* aGlobal)
+ : mGlobal(aGlobal),
+ mPrivateKey(nullptr),
+ mCertificate(nullptr),
+ mAuthType(ssl_kea_null),
+ mExpires(0)
+{
+}
+
+RTCCertificate::RTCCertificate(nsIGlobalObject* aGlobal,
+ SECKEYPrivateKey* aPrivateKey,
+ CERTCertificate* aCertificate,
+ SSLKEAType aAuthType,
+ PRTime aExpires)
+ : mGlobal(aGlobal),
+ mPrivateKey(aPrivateKey),
+ mCertificate(aCertificate),
+ mAuthType(aAuthType),
+ mExpires(aExpires)
+{
+}
+
+RTCCertificate::~RTCCertificate()
+{
+ nsNSSShutDownPreventionLock locker;
+ if (isAlreadyShutDown()) {
+ return;
+ }
+ destructorSafeDestroyNSSReference();
+ shutdown(ShutdownCalledFrom::Object);
+}
+
+// This creates some interesting lifecycle consequences, since the DtlsIdentity
+// holds NSS objects, but does not implement nsNSSShutDownObject.
+
+// Unfortunately, the code that uses DtlsIdentity cannot always use that lock
+// due to external linkage requirements. Therefore, the lock is held on this
+// object instead. Consequently, the DtlsIdentity that this method returns must
+// have a lifetime that is strictly shorter than the RTCCertificate.
+//
+// RTCPeerConnection provides this guarantee by holding a strong reference to
+// the RTCCertificate. It will cleanup any DtlsIdentity instances that it
+// creates before the RTCCertificate reference is released.
+RefPtr<DtlsIdentity>
+RTCCertificate::CreateDtlsIdentity() const
+{
+ nsNSSShutDownPreventionLock locker;
+ if (isAlreadyShutDown() || !mPrivateKey || !mCertificate) {
+ return nullptr;
+ }
+ SECKEYPrivateKey* key = SECKEY_CopyPrivateKey(mPrivateKey.get());
+ CERTCertificate* cert = CERT_DupCertificate(mCertificate.get());
+ RefPtr<DtlsIdentity> id = new DtlsIdentity(key, cert, mAuthType);
+ return id;
+}
+
+JSObject*
+RTCCertificate::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
+{
+ return RTCCertificateBinding::Wrap(aCx, this, aGivenProto);
+}
+
+void
+RTCCertificate::virtualDestroyNSSReference()
+{
+ destructorSafeDestroyNSSReference();
+}
+
+void
+RTCCertificate::destructorSafeDestroyNSSReference()
+{
+ mPrivateKey.reset();
+ mCertificate.reset();
+}
+
+bool
+RTCCertificate::WritePrivateKey(JSStructuredCloneWriter* aWriter,
+ const nsNSSShutDownPreventionLock& aLockProof) const
+{
+ JsonWebKey jwk;
+ nsresult rv = CryptoKey::PrivateKeyToJwk(mPrivateKey.get(), jwk, aLockProof);
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+ nsString json;
+ if (!jwk.ToJSON(json)) {
+ return false;
+ }
+ return WriteString(aWriter, json);
+}
+
+bool
+RTCCertificate::WriteCertificate(JSStructuredCloneWriter* aWriter,
+ const nsNSSShutDownPreventionLock& /*proof*/) const
+{
+ ScopedCERTCertificateList certs(CERT_CertListFromCert(mCertificate.get()));
+ if (!certs || certs->len <= 0) {
+ return false;
+ }
+ if (!JS_WriteUint32Pair(aWriter, certs->certs[0].len, 0)) {
+ return false;
+ }
+ return JS_WriteBytes(aWriter, certs->certs[0].data, certs->certs[0].len);
+}
+
+bool
+RTCCertificate::WriteStructuredClone(JSStructuredCloneWriter* aWriter) const
+{
+ nsNSSShutDownPreventionLock locker;
+ if (isAlreadyShutDown() || !mPrivateKey || !mCertificate) {
+ return false;
+ }
+
+ return JS_WriteUint32Pair(aWriter, RTCCERTIFICATE_SC_VERSION, mAuthType) &&
+ JS_WriteUint32Pair(aWriter, (mExpires >> 32) & 0xffffffff,
+ mExpires & 0xffffffff) &&
+ WritePrivateKey(aWriter, locker) &&
+ WriteCertificate(aWriter, locker);
+}
+
+bool
+RTCCertificate::ReadPrivateKey(JSStructuredCloneReader* aReader,
+ const nsNSSShutDownPreventionLock& aLockProof)
+{
+ nsString json;
+ if (!ReadString(aReader, json)) {
+ return false;
+ }
+ JsonWebKey jwk;
+ if (!jwk.Init(json)) {
+ return false;
+ }
+ mPrivateKey.reset(CryptoKey::PrivateKeyFromJwk(jwk, aLockProof));
+ return !!mPrivateKey;
+}
+
+bool
+RTCCertificate::ReadCertificate(JSStructuredCloneReader* aReader,
+ const nsNSSShutDownPreventionLock& /*proof*/)
+{
+ CryptoBuffer cert;
+ if (!ReadBuffer(aReader, cert) || cert.Length() == 0) {
+ return false;
+ }
+
+ SECItem der = { siBuffer, cert.Elements(),
+ static_cast<unsigned int>(cert.Length()) };
+ mCertificate.reset(CERT_NewTempCertificate(CERT_GetDefaultCertDB(),
+ &der, nullptr, true, true));
+ return !!mCertificate;
+}
+
+bool
+RTCCertificate::ReadStructuredClone(JSStructuredCloneReader* aReader)
+{
+ nsNSSShutDownPreventionLock locker;
+ if (isAlreadyShutDown()) {
+ return false;
+ }
+
+ uint32_t version, authType;
+ if (!JS_ReadUint32Pair(aReader, &version, &authType) ||
+ version != RTCCERTIFICATE_SC_VERSION) {
+ return false;
+ }
+ mAuthType = static_cast<SSLKEAType>(authType);
+
+ uint32_t high, low;
+ if (!JS_ReadUint32Pair(aReader, &high, &low)) {
+ return false;
+ }
+ mExpires = static_cast<PRTime>(high) << 32 | low;
+
+ return ReadPrivateKey(aReader, locker) &&
+ ReadCertificate(aReader, locker);
+}
+
+} // namespace dom
+} // namespace mozilla
diff --git a/dom/media/webrtc/RTCCertificate.h b/dom/media/webrtc/RTCCertificate.h
new file mode 100644
index 000000000..63869849c
--- /dev/null
+++ b/dom/media/webrtc/RTCCertificate.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_RTCCertificate_h
+#define mozilla_dom_RTCCertificate_h
+
+#include "nsCycleCollectionParticipant.h"
+#include "nsWrapperCache.h"
+#include "nsIGlobalObject.h"
+#include "nsNSSShutDown.h"
+#include "prtime.h"
+#include "sslt.h"
+#include "ScopedNSSTypes.h"
+
+#include "mozilla/ErrorResult.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/dom/CryptoKey.h"
+#include "mozilla/dom/RTCCertificateBinding.h"
+#include "mtransport/dtlsidentity.h"
+#include "js/StructuredClone.h"
+#include "js/TypeDecls.h"
+
+namespace mozilla {
+namespace dom {
+
+class ObjectOrString;
+
+class RTCCertificate final
+ : public nsISupports,
+ public nsWrapperCache,
+ public nsNSSShutDownObject
+{
+public:
+ NS_DECL_CYCLE_COLLECTING_ISUPPORTS
+ NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(RTCCertificate)
+
+ // WebIDL method that implements RTCPeerConnection.generateCertificate.
+ static already_AddRefed<Promise> GenerateCertificate(
+ const GlobalObject& aGlobal, const ObjectOrString& aOptions,
+ ErrorResult& aRv, JSCompartment* aCompartment = nullptr);
+
+ explicit RTCCertificate(nsIGlobalObject* aGlobal);
+ RTCCertificate(nsIGlobalObject* aGlobal, SECKEYPrivateKey* aPrivateKey,
+ CERTCertificate* aCertificate, SSLKEAType aAuthType,
+ PRTime aExpires);
+
+ nsIGlobalObject* GetParentObject() const { return mGlobal; }
+ virtual JSObject* WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) override;
+
+ // WebIDL expires attribute. Note: JS dates are milliseconds since epoch;
+ // NSPR PRTime is in microseconds since the same epoch.
+ uint64_t Expires() const
+ {
+ return mExpires / PR_USEC_PER_MSEC;
+ }
+
+ // Accessors for use by PeerConnectionImpl.
+ RefPtr<DtlsIdentity> CreateDtlsIdentity() const;
+ const UniqueCERTCertificate& Certificate() const { return mCertificate; }
+
+ // For nsNSSShutDownObject
+ virtual void virtualDestroyNSSReference() override;
+ void destructorSafeDestroyNSSReference();
+
+ // Structured clone methods
+ bool WriteStructuredClone(JSStructuredCloneWriter* aWriter) const;
+ bool ReadStructuredClone(JSStructuredCloneReader* aReader);
+
+private:
+ ~RTCCertificate();
+ void operator=(const RTCCertificate&) = delete;
+ RTCCertificate(const RTCCertificate&) = delete;
+
+ bool ReadCertificate(JSStructuredCloneReader* aReader,
+ const nsNSSShutDownPreventionLock& /*lockproof*/);
+ bool ReadPrivateKey(JSStructuredCloneReader* aReader,
+ const nsNSSShutDownPreventionLock& aLockProof);
+ bool WriteCertificate(JSStructuredCloneWriter* aWriter,
+ const nsNSSShutDownPreventionLock& /*lockproof*/) const;
+ bool WritePrivateKey(JSStructuredCloneWriter* aWriter,
+ const nsNSSShutDownPreventionLock& aLockProof) const;
+
+ RefPtr<nsIGlobalObject> mGlobal;
+ UniqueSECKEYPrivateKey mPrivateKey;
+ UniqueCERTCertificate mCertificate;
+ SSLKEAType mAuthType;
+ PRTime mExpires;
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif // mozilla_dom_RTCCertificate_h
diff --git a/dom/media/webrtc/RTCIdentityProviderRegistrar.cpp b/dom/media/webrtc/RTCIdentityProviderRegistrar.cpp
new file mode 100644
index 000000000..1d33923d2
--- /dev/null
+++ b/dom/media/webrtc/RTCIdentityProviderRegistrar.cpp
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "RTCIdentityProviderRegistrar.h"
+#include "mozilla/Attributes.h"
+#include "nsCycleCollectionParticipant.h"
+
+namespace mozilla {
+namespace dom {
+
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(RTCIdentityProviderRegistrar)
+ NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+NS_IMPL_CYCLE_COLLECTING_ADDREF(RTCIdentityProviderRegistrar)
+NS_IMPL_CYCLE_COLLECTING_RELEASE(RTCIdentityProviderRegistrar)
+
+NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(RTCIdentityProviderRegistrar,
+ mGlobal,
+ mGenerateAssertionCallback,
+ mValidateAssertionCallback)
+
+RTCIdentityProviderRegistrar::RTCIdentityProviderRegistrar(
+ nsIGlobalObject* aGlobal)
+ : mGlobal(aGlobal)
+ , mGenerateAssertionCallback(nullptr)
+ , mValidateAssertionCallback(nullptr)
+{
+ MOZ_COUNT_CTOR(RTCIdentityProviderRegistrar);
+}
+
+RTCIdentityProviderRegistrar::~RTCIdentityProviderRegistrar()
+{
+ MOZ_COUNT_DTOR(RTCIdentityProviderRegistrar);
+}
+
+nsIGlobalObject*
+RTCIdentityProviderRegistrar::GetParentObject() const
+{
+ return mGlobal;
+}
+
+JSObject*
+RTCIdentityProviderRegistrar::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
+{
+ return RTCIdentityProviderRegistrarBinding::Wrap(aCx, this, aGivenProto);
+}
+
+void
+RTCIdentityProviderRegistrar::Register(const RTCIdentityProvider& aIdp)
+{
+ mGenerateAssertionCallback = aIdp.mGenerateAssertion;
+ mValidateAssertionCallback = aIdp.mValidateAssertion;
+}
+
+bool
+RTCIdentityProviderRegistrar::HasIdp() const
+{
+ return mGenerateAssertionCallback && mValidateAssertionCallback;
+}
+
+already_AddRefed<Promise>
+RTCIdentityProviderRegistrar::GenerateAssertion(
+ const nsAString& aContents, const nsAString& aOrigin,
+ const Optional<nsAString>& aUsernameHint, ErrorResult& aRv)
+{
+ if (!mGenerateAssertionCallback) {
+ aRv.Throw(NS_ERROR_NOT_INITIALIZED);
+ return nullptr;
+ }
+ return mGenerateAssertionCallback->Call(aContents, aOrigin, aUsernameHint, aRv);
+}
+already_AddRefed<Promise>
+RTCIdentityProviderRegistrar::ValidateAssertion(
+ const nsAString& aAssertion, const nsAString& aOrigin, ErrorResult& aRv)
+{
+ if (!mValidateAssertionCallback) {
+ aRv.Throw(NS_ERROR_NOT_INITIALIZED);
+ return nullptr;
+ }
+ return mValidateAssertionCallback->Call(aAssertion, aOrigin, aRv);
+}
+
+
+
+} // namespace dom
+} // namespace mozilla
diff --git a/dom/media/webrtc/RTCIdentityProviderRegistrar.h b/dom/media/webrtc/RTCIdentityProviderRegistrar.h
new file mode 100644
index 000000000..49537503b
--- /dev/null
+++ b/dom/media/webrtc/RTCIdentityProviderRegistrar.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef RTCIDENTITYPROVIDER_H_
+#define RTCIDENTITYPROVIDER_H_
+
+#include "mozilla/RefPtr.h"
+#include "nsCOMPtr.h"
+#include "nsISupportsImpl.h"
+#include "nsIGlobalObject.h"
+#include "nsWrapperCache.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/BindingDeclarations.h"
+#include "mozilla/dom/RTCIdentityProviderBinding.h"
+
+namespace mozilla {
+namespace dom {
+
+struct RTCIdentityProvider;
+
+class RTCIdentityProviderRegistrar final : public nsISupports,
+ public nsWrapperCache
+{
+public:
+ NS_DECL_CYCLE_COLLECTING_ISUPPORTS
+ NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(RTCIdentityProviderRegistrar)
+
+ explicit RTCIdentityProviderRegistrar(nsIGlobalObject* aGlobal);
+
+ // As required
+ nsIGlobalObject* GetParentObject() const;
+ virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
+
+ // setter and checker
+ void Register(const RTCIdentityProvider& aIdp);
+ bool HasIdp() const;
+
+ already_AddRefed<Promise>
+ GenerateAssertion(const nsAString& aContents, const nsAString& aOrigin,
+ const Optional<nsAString>& aUsernameHint, ErrorResult& aRv);
+ already_AddRefed<Promise>
+ ValidateAssertion(const nsAString& assertion, const nsAString& origin,
+ ErrorResult& aRv);
+
+private:
+ ~RTCIdentityProviderRegistrar();
+
+ nsCOMPtr<nsIGlobalObject> mGlobal;
+ RefPtr<GenerateAssertionCallback> mGenerateAssertionCallback;
+ RefPtr<ValidateAssertionCallback> mValidateAssertionCallback;
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif /* RTCIDENTITYPROVIDER_H_ */
diff --git a/dom/media/webrtc/WebrtcGlobal.h b/dom/media/webrtc/WebrtcGlobal.h
new file mode 100644
index 000000000..8ab10cb0d
--- /dev/null
+++ b/dom/media/webrtc/WebrtcGlobal.h
@@ -0,0 +1,497 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _WEBRTC_GLOBAL_H_
+#define _WEBRTC_GLOBAL_H_
+
+#include "ipc/IPCMessageUtils.h"
+#include "mozilla/dom/BindingDeclarations.h"
+#include "mozilla/dom/RTCStatsReportBinding.h"
+#include "nsAutoPtr.h"
+
+typedef mozilla::dom::RTCStatsReportInternal StatsReport;
+typedef nsTArray< nsAutoPtr<StatsReport>> RTCReports;
+typedef mozilla::dom::Sequence<nsString> WebrtcGlobalLog;
+
+namespace IPC {
+
+template<typename T>
+struct ParamTraits<mozilla::dom::Optional<T>>
+{
+ typedef mozilla::dom::Optional<T> paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ if (aParam.WasPassed()) {
+ WriteParam(aMsg, true);
+ WriteParam(aMsg, aParam.Value());
+ return;
+ }
+
+ WriteParam(aMsg, false);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ bool was_passed = false;
+
+ if (!ReadParam(aMsg, aIter, &was_passed)) {
+ return false;
+ }
+
+ aResult->Reset(); //XXX Optional_base seems to reach this point with isSome true.
+
+ if (was_passed) {
+ if (!ReadParam(aMsg, aIter, &(aResult->Construct()))) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+template<typename T>
+struct ParamTraits<mozilla::dom::Sequence<T>>
+{
+ typedef mozilla::dom::Sequence<T> paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, static_cast<const FallibleTArray<T>&>(aParam));
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ return ReadParam(aMsg, aIter, dynamic_cast<FallibleTArray<T>*>(aResult));
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCStatsType> :
+ public ContiguousEnumSerializer<
+ mozilla::dom::RTCStatsType,
+ mozilla::dom::RTCStatsType::Inboundrtp,
+ mozilla::dom::RTCStatsType::EndGuard_>
+{};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCStatsIceCandidatePairState> :
+ public ContiguousEnumSerializer<
+ mozilla::dom::RTCStatsIceCandidatePairState,
+ mozilla::dom::RTCStatsIceCandidatePairState::Frozen,
+ mozilla::dom::RTCStatsIceCandidatePairState::EndGuard_>
+{};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCStatsIceCandidateType> :
+ public ContiguousEnumSerializer<
+ mozilla::dom::RTCStatsIceCandidateType,
+ mozilla::dom::RTCStatsIceCandidateType::Host,
+ mozilla::dom::RTCStatsIceCandidateType::EndGuard_>
+{};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCStatsReportInternal>
+{
+ typedef mozilla::dom::RTCStatsReportInternal paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mClosed);
+ WriteParam(aMsg, aParam.mCodecStats);
+ WriteParam(aMsg, aParam.mIceCandidatePairStats);
+ WriteParam(aMsg, aParam.mIceCandidateStats);
+ WriteParam(aMsg, aParam.mIceComponentStats);
+ WriteParam(aMsg, aParam.mInboundRTPStreamStats);
+ WriteParam(aMsg, aParam.mLocalSdp);
+ WriteParam(aMsg, aParam.mMediaStreamStats);
+ WriteParam(aMsg, aParam.mMediaStreamTrackStats);
+ WriteParam(aMsg, aParam.mOutboundRTPStreamStats);
+ WriteParam(aMsg, aParam.mPcid);
+ WriteParam(aMsg, aParam.mRemoteSdp);
+ WriteParam(aMsg, aParam.mTimestamp);
+ WriteParam(aMsg, aParam.mTransportStats);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mClosed)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mCodecStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mIceCandidatePairStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mIceCandidateStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mIceComponentStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mInboundRTPStreamStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mLocalSdp)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMediaStreamStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMediaStreamTrackStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mOutboundRTPStreamStats)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPcid)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mRemoteSdp)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTimestamp)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTransportStats))) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+typedef mozilla::dom::RTCStats RTCStats;
+
+static void WriteRTCStats(Message* aMsg, const RTCStats& aParam)
+{
+ // RTCStats base class
+ WriteParam(aMsg, aParam.mId);
+ WriteParam(aMsg, aParam.mTimestamp);
+ WriteParam(aMsg, aParam.mType);
+}
+
+static bool ReadRTCStats(const Message* aMsg, PickleIterator* aIter, RTCStats* aResult)
+{
+ // RTCStats base class
+ if (!ReadParam(aMsg, aIter, &(aResult->mId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTimestamp)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mType))) {
+ return false;
+ }
+
+ return true;
+}
+
+template<>
+struct ParamTraits<mozilla::dom::RTCCodecStats>
+{
+ typedef mozilla::dom::RTCCodecStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mChannels);
+ WriteParam(aMsg, aParam.mClockRate);
+ WriteParam(aMsg, aParam.mCodec);
+ WriteParam(aMsg, aParam.mParameters);
+ WriteParam(aMsg, aParam.mPayloadType);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mChannels)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mClockRate)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mCodec)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mParameters)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPayloadType)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCIceCandidatePairStats>
+{
+ typedef mozilla::dom::RTCIceCandidatePairStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mComponentId);
+ WriteParam(aMsg, aParam.mLocalCandidateId);
+ WriteParam(aMsg, aParam.mPriority);
+ WriteParam(aMsg, aParam.mNominated);
+ WriteParam(aMsg, aParam.mReadable);
+ WriteParam(aMsg, aParam.mRemoteCandidateId);
+ WriteParam(aMsg, aParam.mSelected);
+ WriteParam(aMsg, aParam.mState);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mComponentId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mLocalCandidateId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPriority)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mNominated)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mReadable)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mRemoteCandidateId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mSelected)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mState)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCIceCandidateStats>
+{
+ typedef mozilla::dom::RTCIceCandidateStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mCandidateId);
+ WriteParam(aMsg, aParam.mCandidateType);
+ WriteParam(aMsg, aParam.mComponentId);
+ WriteParam(aMsg, aParam.mIpAddress);
+ WriteParam(aMsg, aParam.mMozLocalTransport);
+ WriteParam(aMsg, aParam.mPortNumber);
+ WriteParam(aMsg, aParam.mTransport);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mCandidateId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mCandidateType)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mComponentId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mIpAddress)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMozLocalTransport)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPortNumber)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTransport)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCIceComponentStats>
+{
+ typedef mozilla::dom::RTCIceComponentStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mActiveConnection);
+ WriteParam(aMsg, aParam.mBytesReceived);
+ WriteParam(aMsg, aParam.mBytesSent);
+ WriteParam(aMsg, aParam.mComponent);
+ WriteParam(aMsg, aParam.mTransportId);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mActiveConnection)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mBytesReceived)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mBytesSent)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mComponent)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTransportId)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+static void WriteRTCRTPStreamStats(
+ Message* aMsg,
+ const mozilla::dom::RTCRTPStreamStats& aParam)
+{
+ WriteParam(aMsg, aParam.mBitrateMean);
+ WriteParam(aMsg, aParam.mBitrateStdDev);
+ WriteParam(aMsg, aParam.mCodecId);
+ WriteParam(aMsg, aParam.mFramerateMean);
+ WriteParam(aMsg, aParam.mFramerateStdDev);
+ WriteParam(aMsg, aParam.mIsRemote);
+ WriteParam(aMsg, aParam.mMediaTrackId);
+ WriteParam(aMsg, aParam.mMediaType);
+ WriteParam(aMsg, aParam.mRemoteId);
+ WriteParam(aMsg, aParam.mSsrc);
+ WriteParam(aMsg, aParam.mTransportId);
+}
+
+static bool ReadRTCRTPStreamStats(
+ const Message* aMsg, PickleIterator* aIter,
+ mozilla::dom::RTCRTPStreamStats* aResult)
+{
+ if (!ReadParam(aMsg, aIter, &(aResult->mBitrateMean)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mBitrateStdDev)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mCodecId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramerateMean)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramerateStdDev)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mIsRemote)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMediaTrackId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMediaType)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mRemoteId)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mSsrc)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTransportId))) {
+ return false;
+ }
+
+ return true;
+}
+
+template<>
+struct ParamTraits<mozilla::dom::RTCInboundRTPStreamStats>
+{
+ typedef mozilla::dom::RTCInboundRTPStreamStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mBytesReceived);
+ WriteParam(aMsg, aParam.mDiscardedPackets);
+ WriteParam(aMsg, aParam.mJitter);
+ WriteParam(aMsg, aParam.mMozAvSyncDelay);
+ WriteParam(aMsg, aParam.mMozJitterBufferDelay);
+ WriteParam(aMsg, aParam.mMozRtt);
+ WriteParam(aMsg, aParam.mPacketsLost);
+ WriteParam(aMsg, aParam.mPacketsReceived);
+ WriteRTCRTPStreamStats(aMsg, aParam);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mBytesReceived)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mDiscardedPackets)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mJitter)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMozAvSyncDelay)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMozJitterBufferDelay)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mMozRtt)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPacketsLost)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPacketsReceived)) ||
+ !ReadRTCRTPStreamStats(aMsg, aIter, aResult) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCOutboundRTPStreamStats>
+{
+ typedef mozilla::dom::RTCOutboundRTPStreamStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mBytesSent);
+ WriteParam(aMsg, aParam.mDroppedFrames);
+ WriteParam(aMsg, aParam.mPacketsSent);
+ WriteParam(aMsg, aParam.mTargetBitrate);
+ WriteRTCRTPStreamStats(aMsg, aParam);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mBytesSent)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mDroppedFrames)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mPacketsSent)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTargetBitrate)) ||
+ !ReadRTCRTPStreamStats(aMsg, aIter, aResult) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCMediaStreamStats>
+{
+ typedef mozilla::dom::RTCMediaStreamStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mStreamIdentifier);
+ WriteParam(aMsg, aParam.mTrackIds);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mStreamIdentifier)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTrackIds)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCTransportStats>
+{
+ typedef mozilla::dom::RTCTransportStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mBytesReceived);
+ WriteParam(aMsg, aParam.mBytesSent);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mBytesReceived)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mBytesSent)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct ParamTraits<mozilla::dom::RTCMediaStreamTrackStats>
+{
+ typedef mozilla::dom::RTCMediaStreamTrackStats paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ WriteParam(aMsg, aParam.mAudioLevel);
+ WriteParam(aMsg, aParam.mEchoReturnLoss);
+ WriteParam(aMsg, aParam.mEchoReturnLossEnhancement);
+ WriteParam(aMsg, aParam.mFrameHeight);
+ WriteParam(aMsg, aParam.mFrameWidth);
+ WriteParam(aMsg, aParam.mFramesCorrupted);
+ WriteParam(aMsg, aParam.mFramesDecoded);
+ WriteParam(aMsg, aParam.mFramesDropped);
+ WriteParam(aMsg, aParam.mFramesPerSecond);
+ WriteParam(aMsg, aParam.mFramesReceived);
+ WriteParam(aMsg, aParam.mFramesSent);
+ WriteParam(aMsg, aParam.mRemoteSource);
+ WriteParam(aMsg, aParam.mSsrcIds);
+ WriteParam(aMsg, aParam.mTrackIdentifier);
+ WriteRTCStats(aMsg, aParam);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!ReadParam(aMsg, aIter, &(aResult->mAudioLevel)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mEchoReturnLoss)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mEchoReturnLossEnhancement)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFrameHeight)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFrameWidth)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramesCorrupted)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramesDecoded)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramesDropped)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramesPerSecond)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramesReceived)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mFramesSent)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mRemoteSource)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mSsrcIds)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mTrackIdentifier)) ||
+ !ReadRTCStats(aMsg, aIter, aResult)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+} // namespace ipc
+
+#endif // _WEBRTC_GLOBAL_H_
diff --git a/dom/media/webrtc/moz.build b/dom/media/webrtc/moz.build
new file mode 100644
index 000000000..66def8719
--- /dev/null
+++ b/dom/media/webrtc/moz.build
@@ -0,0 +1,88 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files('*'):
+ BUG_COMPONENT = ('Core', 'WebRTC: Audio/Video')
+
+with Files('PeerIdentity.*'):
+ BUG_COMPONENT = ('Core', 'WebRTC: Signaling')
+
+XPIDL_MODULE = 'content_webrtc'
+
+EXPORTS += [
+ 'MediaEngine.h',
+ 'MediaEngineCameraVideoSource.h',
+ 'MediaEngineDefault.h',
+ 'MediaTrackConstraints.h',
+]
+
+if CONFIG['MOZ_WEBRTC']:
+ if CONFIG['OS_TARGET'] == 'WINNT':
+ DEFINES['WEBRTC_WIN'] = True
+ else:
+ DEFINES['WEBRTC_POSIX'] = True
+ EXPORTS += ['AudioOutputObserver.h',
+ 'MediaEngineRemoteVideoSource.h',
+ 'MediaEngineWebRTC.h']
+ EXPORTS.mozilla.dom += [ 'RTCIdentityProviderRegistrar.h' ]
+ UNIFIED_SOURCES += [
+ 'MediaEngineCameraVideoSource.cpp',
+ 'MediaEngineRemoteVideoSource.cpp',
+ 'MediaEngineTabVideoSource.cpp',
+ 'MediaEngineWebRTCAudio.cpp',
+ 'RTCCertificate.cpp',
+ 'RTCIdentityProviderRegistrar.cpp',
+ ]
+ # MediaEngineWebRTC.cpp needs to be built separately.
+ SOURCES += [
+ 'MediaEngineWebRTC.cpp',
+ ]
+ LOCAL_INCLUDES += [
+ '/dom/base',
+ '/media/libyuv/include',
+ '/media/webrtc/signaling/src/common',
+ '/media/webrtc/signaling/src/common/browser_logging',
+ '/media/webrtc/trunk',
+ ]
+
+XPIDL_SOURCES += [
+ 'nsITabSource.idl'
+]
+
+UNIFIED_SOURCES += [
+ 'MediaEngineDefault.cpp',
+ 'MediaTrackConstraints.cpp',
+ 'PeerIdentity.cpp',
+]
+
+EXPORTS.mozilla += [
+ 'PeerIdentity.h',
+]
+EXPORTS.mozilla.dom += [
+ 'RTCCertificate.h',
+]
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+# Suppress some GCC/clang warnings being treated as errors:
+# - about attributes on forward declarations for types that are already
+# defined, which complains about important MOZ_EXPORT attributes for
+# android API types
+if CONFIG['GNU_CC'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-error=attributes',
+ '-Wno-error=shadow',
+ ]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['_MSC_VER']:
+ CXXFLAGS += [
+ '-wd4275', # non dll-interface class used as base for dll-interface class
+ '-wd4312', # This is intended as a temporary hack to support building with VS2015
+ # 'reinterpret_cast': conversion from 'DWORD' to 'HANDLE' of greater size
+ ]
+ DEFINES['__PRETTY_FUNCTION__'] = '__FUNCSIG__'
diff --git a/dom/media/webrtc/nsITabSource.idl b/dom/media/webrtc/nsITabSource.idl
new file mode 100644
index 000000000..c44d51208
--- /dev/null
+++ b/dom/media/webrtc/nsITabSource.idl
@@ -0,0 +1,20 @@
+/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface mozIDOMWindowProxy;
+
+[scriptable,uuid(0feba7f2-800d-4fe5-b28d-e3f17a7a7322)]
+interface nsITabSource : nsISupports
+{
+ mozIDOMWindowProxy getTabToStream();
+ void notifyStreamStart(in mozIDOMWindowProxy window);
+ void notifyStreamStop(in mozIDOMWindowProxy window);
+};
+
+%{C++
+#define NS_TABSOURCESERVICE_CONTRACTID "@mozilla.org/tab-source-service;1"
+%}