diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /media/webrtc/signaling/src/media-conduit | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'media/webrtc/signaling/src/media-conduit')
23 files changed, 9260 insertions, 0 deletions
diff --git a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp new file mode 100755 index 000000000..2c57431e7 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp @@ -0,0 +1,1134 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CSFLog.h" +#include "nspr.h" + +#ifdef HAVE_NETINET_IN_H +#include <netinet/in.h> +#elif defined XP_WIN +#include <winsock2.h> +#endif + +#include "AudioConduit.h" +#include "nsCOMPtr.h" +#include "mozilla/Services.h" +#include "nsServiceManagerUtils.h" +#include "nsIPrefService.h" +#include "nsIPrefBranch.h" +#include "nsThreadUtils.h" +#if !defined(MOZILLA_EXTERNAL_LINKAGE) +#include "Latency.h" +#include "mozilla/Telemetry.h" +#endif + +#include "webrtc/common.h" +#include "webrtc/modules/audio_processing/include/audio_processing.h" +#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h" +#include "webrtc/voice_engine/include/voe_dtmf.h" +#include "webrtc/voice_engine/include/voe_errors.h" +#include "webrtc/voice_engine/voice_engine_impl.h" +#include "webrtc/system_wrappers/interface/clock.h" + +#ifdef MOZ_WIDGET_ANDROID +#include "AndroidJNIWrapper.h" +#endif + +namespace mozilla { + +static const char* logTag ="WebrtcAudioSessionConduit"; + +// 32 bytes is what WebRTC CodecInst expects +const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32; + +/** + * Factory Method for AudioConduit + */ +RefPtr<AudioSessionConduit> AudioSessionConduit::Create() +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + WebrtcAudioConduit* obj = new WebrtcAudioConduit(); + if(obj->Init() != kMediaConduitNoError) + { + CSFLogError(logTag, "%s AudioConduit Init Failed ", __FUNCTION__); + delete obj; + return nullptr; + } + CSFLogDebug(logTag, "%s Successfully created AudioConduit ", __FUNCTION__); + return obj; +} + +/** + * Destruction defines for our super-classes + */ +WebrtcAudioConduit::~WebrtcAudioConduit() +{ + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + + CSFLogDebug(logTag, "%s ", __FUNCTION__); + for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) + { + delete mRecvCodecList[i]; + } + + // The first one of a pair to be deleted shuts down media for both + if(mPtrVoEXmedia) + { + mPtrVoEXmedia->SetExternalRecordingStatus(false); + mPtrVoEXmedia->SetExternalPlayoutStatus(false); + } + + //Deal with the transport + if(mPtrVoENetwork) + { + mPtrVoENetwork->DeRegisterExternalTransport(mChannel); + } + + if(mPtrVoEBase) + { + mPtrVoEBase->StopPlayout(mChannel); + mPtrVoEBase->StopSend(mChannel); + mPtrVoEBase->StopReceive(mChannel); + mPtrVoEBase->DeleteChannel(mChannel); + mPtrVoEBase->Terminate(); + } + + // We shouldn't delete the VoiceEngine until all these are released! + // And we can't use a Scoped ptr, since the order is arbitrary + mPtrVoENetwork = nullptr; + mPtrVoEBase = nullptr; + mPtrVoECodec = nullptr; + mPtrVoEXmedia = nullptr; + mPtrVoEProcessing = nullptr; + mPtrVoEVideoSync = nullptr; + mPtrVoERTP_RTCP = nullptr; + mPtrRTP = nullptr; + + if(mVoiceEngine) + { + webrtc::VoiceEngine::Delete(mVoiceEngine); + } +} + +bool WebrtcAudioConduit::SetLocalSSRC(unsigned int ssrc) +{ + unsigned int oldSsrc; + if (!GetLocalSSRC(&oldSsrc)) { + MOZ_ASSERT(false, "GetLocalSSRC failed"); + return false; + } + + if (oldSsrc == ssrc) { + return true; + } + + bool wasTransmitting = mEngineTransmitting; + if (StopTransmitting() != kMediaConduitNoError) { + return false; + } + + if (mPtrRTP->SetLocalSSRC(mChannel, ssrc)) { + return false; + } + + if (wasTransmitting) { + if (StartTransmitting() != kMediaConduitNoError) { + return false; + } + } + return true; +} + +bool WebrtcAudioConduit::GetLocalSSRC(unsigned int* ssrc) { + return !mPtrRTP->GetLocalSSRC(mChannel, *ssrc); +} + +bool WebrtcAudioConduit::GetRemoteSSRC(unsigned int* ssrc) { + return !mPtrRTP->GetRemoteSSRC(mChannel, *ssrc); +} + +bool WebrtcAudioConduit::SetLocalCNAME(const char* cname) +{ + char temp[256]; + strncpy(temp, cname, sizeof(temp) - 1); + temp[sizeof(temp) - 1] = 0; + return !mPtrRTP->SetRTCP_CNAME(mChannel, temp); +} + +bool WebrtcAudioConduit::GetAVStats(int32_t* jitterBufferDelayMs, + int32_t* playoutBufferDelayMs, + int32_t* avSyncOffsetMs) { + return !mPtrVoEVideoSync->GetDelayEstimate(mChannel, + jitterBufferDelayMs, + playoutBufferDelayMs, + avSyncOffsetMs); +} + +bool WebrtcAudioConduit::GetRTPStats(unsigned int* jitterMs, + unsigned int* cumulativeLost) { + unsigned int maxJitterMs = 0; + unsigned int discardedPackets; + *jitterMs = 0; + *cumulativeLost = 0; + return !mPtrRTP->GetRTPStatistics(mChannel, *jitterMs, maxJitterMs, + discardedPackets, *cumulativeLost); +} + +DOMHighResTimeStamp +NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow) { + return (uint32_t(ntpHigh - webrtc::kNtpJan1970) + + double(ntpLow) / webrtc::kMagicNtpFractionalUnit) * 1000; +} + +bool WebrtcAudioConduit::GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp, + uint32_t* jitterMs, + uint32_t* packetsReceived, + uint64_t* bytesReceived, + uint32_t* cumulativeLost, + int32_t* rttMs) { + uint32_t ntpHigh, ntpLow; + uint16_t fractionLost; + bool result = !mPtrRTP->GetRemoteRTCPReceiverInfo(mChannel, ntpHigh, ntpLow, + *packetsReceived, + *bytesReceived, + *jitterMs, + fractionLost, + *cumulativeLost, + *rttMs); + if (result) { + *timestamp = NTPtoDOMHighResTimeStamp(ntpHigh, ntpLow); + } + return result; +} + +bool WebrtcAudioConduit::GetRTCPSenderReport(DOMHighResTimeStamp* timestamp, + unsigned int* packetsSent, + uint64_t* bytesSent) { + webrtc::RTCPSenderInfo senderInfo; + webrtc::RtpRtcp * rtpRtcpModule; + webrtc::RtpReceiver * rtp_receiver; + bool result = + !mPtrVoEVideoSync->GetRtpRtcp(mChannel,&rtpRtcpModule,&rtp_receiver) && + !rtpRtcpModule->RemoteRTCPStat(&senderInfo); + if (result){ + *timestamp = NTPtoDOMHighResTimeStamp(senderInfo.NTPseconds, + senderInfo.NTPfraction); + *packetsSent = senderInfo.sendPacketCount; + *bytesSent = senderInfo.sendOctetCount; + } + return result; + } + +bool WebrtcAudioConduit::SetDtmfPayloadType(unsigned char type) { + CSFLogInfo(logTag, "%s : setting dtmf payload %d", __FUNCTION__, (int)type); + + ScopedCustomReleasePtr<webrtc::VoEDtmf> mPtrVoEDtmf; + mPtrVoEDtmf = webrtc::VoEDtmf::GetInterface(mVoiceEngine); + if (!mPtrVoEDtmf) { + CSFLogError(logTag, "%s Unable to initialize VoEDtmf", __FUNCTION__); + return false; + } + + int result = mPtrVoEDtmf->SetSendTelephoneEventPayloadType(mChannel, type); + if (result == -1) { + CSFLogError(logTag, "%s Failed call to SetSendTelephoneEventPayloadType", + __FUNCTION__); + } + return result != -1; +} + +bool WebrtcAudioConduit::InsertDTMFTone(int channel, int eventCode, + bool outOfBand, int lengthMs, + int attenuationDb) { + NS_ASSERTION(!NS_IsMainThread(), "Do not call on main thread"); + + if (!mVoiceEngine || !mDtmfEnabled) { + return false; + } + + webrtc::VoiceEngineImpl* s = static_cast<webrtc::VoiceEngineImpl*>(mVoiceEngine); + int result = s->SendTelephoneEvent(channel, eventCode, outOfBand, lengthMs, attenuationDb); + return result != -1; +} + +/* + * WebRTCAudioConduit Implementation + */ +MediaConduitErrorCode WebrtcAudioConduit::Init() +{ + CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this); + +#ifdef MOZ_WIDGET_ANDROID + jobject context = jsjni_GetGlobalContextRef(); + // get the JVM + JavaVM *jvm = jsjni_GetVM(); + + if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) { + CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__); + return kMediaConduitSessionNotInited; + } +#endif + + // Per WebRTC APIs below function calls return nullptr on failure + if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) + { + CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoENetwork", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + if(!(mPtrVoERTP_RTCP = VoERTP_RTCP::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoERTP_RTCP", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if(!(mPtrVoEVideoSync = VoEVideoSync::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to initialize VoEVideoSync", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + if (!(mPtrRTP = webrtc::VoERTP_RTCP::GetInterface(mVoiceEngine))) + { + CSFLogError(logTag, "%s Unable to get audio RTP/RTCP interface ", + __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + // init the engine with our audio device layer + if(mPtrVoEBase->Init() == -1) + { + CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) + { + CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); + return kMediaConduitChannelError; + } + + CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); + + if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) + { + CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); + return kMediaConduitTransportRegistrationFail; + } + + if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) + { + CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitExternalPlayoutError; + } + + if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) + { + CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitExternalRecordingError; + } + + CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this); + return kMediaConduitNoError; +} + +// AudioSessionConduit Implementation +MediaConduitErrorCode +WebrtcAudioConduit::SetTransmitterTransport(RefPtr<TransportInterface> aTransport) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + // set the transport + mTransmitterTransport = aTransport; + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::SetReceiverTransport(RefPtr<TransportInterface> aTransport) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + // set the transport + mReceiverTransport = aTransport; + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + MediaConduitErrorCode condError = kMediaConduitNoError; + int error = 0;//webrtc engine errors + webrtc::CodecInst cinst; + + { + //validate codec param + if((condError = ValidateCodecConfig(codecConfig, true)) != kMediaConduitNoError) + { + return condError; + } + } + + condError = StopTransmitting(); + if (condError != kMediaConduitNoError) { + return condError; + } + + if(!CodecConfigToWebRTCCodec(codecConfig,cinst)) + { + CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); + return kMediaConduitMalformedArgument; + } + + if(mPtrVoECodec->SetSendCodec(mChannel, cinst) == -1) + { + error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s SetSendCodec - Invalid Codec %d ",__FUNCTION__, + error); + + if(error == VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR) + { + CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); + return kMediaConduitInvalidSendCodec; + } + CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitUnknownError; + } + + // This must be called after SetSendCodec + if (mPtrVoECodec->SetFECStatus(mChannel, codecConfig->mFECEnabled) == -1) { + CSFLogError(logTag, "%s SetFECStatus Failed %d ", __FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitFECStatusError; + } + + mDtmfEnabled = codecConfig->mDtmfEnabled; + + if (codecConfig->mName == "opus" && codecConfig->mMaxPlaybackRate) { + if (mPtrVoECodec->SetOpusMaxPlaybackRate( + mChannel, + codecConfig->mMaxPlaybackRate) == -1) { + CSFLogError(logTag, "%s SetOpusMaxPlaybackRate Failed %d ", __FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitUnknownError; + } + } + +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + // TEMPORARY - see bug 694814 comment 2 + nsresult rv; + nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); + if (NS_SUCCEEDED(rv)) { + nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); + + if (branch) { + branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay); + } + } +#endif + + condError = StartTransmitting(); + if (condError != kMediaConduitNoError) { + return condError; + } + + { + MutexAutoLock lock(mCodecMutex); + + //Copy the applied config for future reference. + mCurSendCodecConfig = new AudioCodecConfig(codecConfig->mType, + codecConfig->mName, + codecConfig->mFreq, + codecConfig->mPacSize, + codecConfig->mChannels, + codecConfig->mRate, + codecConfig->mFECEnabled); + } + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::ConfigureRecvMediaCodecs( + const std::vector<AudioCodecConfig*>& codecConfigList) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + MediaConduitErrorCode condError = kMediaConduitNoError; + int error = 0; //webrtc engine errors + bool success = false; + + // Are we receiving already? If so, stop receiving and playout + // since we can't apply new recv codec when the engine is playing. + condError = StopReceiving(); + if (condError != kMediaConduitNoError) { + return condError; + } + + if(codecConfigList.empty()) + { + CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + // Try Applying the codecs in the list. + // We succeed if at least one codec was applied and reception was + // started successfully. + for(std::vector<AudioCodecConfig*>::size_type i=0 ;i<codecConfigList.size();i++) + { + //if the codec param is invalid or diplicate, return error + if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) + { + return condError; + } + + webrtc::CodecInst cinst; + if(!CodecConfigToWebRTCCodec(codecConfigList[i],cinst)) + { + CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); + continue; + } + + if(mPtrVoECodec->SetRecPayloadType(mChannel,cinst) == -1) + { + error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s SetRecvCodec Failed %d ",__FUNCTION__, error); + continue; + } else { + CSFLogDebug(logTag, "%s Successfully Set RecvCodec %s", __FUNCTION__, + codecConfigList[i]->mName.c_str()); + //copy this to local database + if(CopyCodecToDB(codecConfigList[i])) + { + success = true; + } else { + CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__); + return kMediaConduitUnknownError; + } + + } + + } //end for + + if(!success) + { + CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); + return kMediaConduitInvalidReceiveCodec; + } + + //If we are here, atleast one codec should have been set + condError = StartReceiving(); + if (condError != kMediaConduitNoError) { + return condError; + } + + DumpCodecDB(); + return kMediaConduitNoError; +} +MediaConduitErrorCode +WebrtcAudioConduit::EnableAudioLevelExtension(bool enabled, uint8_t id) +{ + CSFLogDebug(logTag, "%s %d %d ", __FUNCTION__, enabled, id); + + if (mPtrVoERTP_RTCP->SetSendAudioLevelIndicationStatus(mChannel, enabled, id) == -1) + { + CSFLogError(logTag, "%s SetSendAudioLevelIndicationStatus Failed", __FUNCTION__); + return kMediaConduitUnknownError; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[], + int32_t lengthSamples, + int32_t samplingFreqHz, + int32_t capture_delay) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + // Following checks need to be performed + // 1. Non null audio buffer pointer, + // 2. invalid sampling frequency - less than 0 or unsupported ones + // 3. Appropriate Sample Length for 10 ms audio-frame. This represents + // block size the VoiceEngine feeds into encoder for passed in audio-frame + // Ex: for 16000 sampling rate , valid block-length is 160 + // Similarly for 32000 sampling rate, valid block length is 320 + // We do the check by the verify modular operator below to be zero + + if(!audio_data || (lengthSamples <= 0) || + (IsSamplingFreqSupported(samplingFreqHz) == false) || + ((lengthSamples % (samplingFreqHz / 100) != 0)) ) + { + CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__); + MOZ_ASSERT(PR_FALSE); + return kMediaConduitMalformedArgument; + } + + //validate capture time + if(capture_delay < 0 ) + { + CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); + MOZ_ASSERT(PR_FALSE); + return kMediaConduitMalformedArgument; + } + + // if transmission is not started .. conduit cannot insert frames + if(!mEngineTransmitting) + { + CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) { + struct Processing insert = { TimeStamp::Now(), 0 }; + mProcessing.AppendElement(insert); + } +#endif + + capture_delay = mCaptureDelay; + //Insert the samples + if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data, + lengthSamples, + samplingFreqHz, + capture_delay) == -1) + { + int error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s Inserting audio data Failed %d", __FUNCTION__, error); + if(error == VE_RUNTIME_REC_ERROR) + { + return kMediaConduitRecordingError; + } + return kMediaConduitUnknownError; + } + // we should be good here + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::GetAudioFrame(int16_t speechData[], + int32_t samplingFreqHz, + int32_t capture_delay, + int& lengthSamples) +{ + + CSFLogDebug(logTag, "%s ", __FUNCTION__); + unsigned int numSamples = 0; + + //validate params + if(!speechData ) + { + CSFLogError(logTag,"%s Null Audio Buffer Pointer", __FUNCTION__); + MOZ_ASSERT(PR_FALSE); + return kMediaConduitMalformedArgument; + } + + // Validate sample length + if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0 ) + { + CSFLogError(logTag,"%s Invalid Sampling Frequency ", __FUNCTION__); + MOZ_ASSERT(PR_FALSE); + return kMediaConduitMalformedArgument; + } + + //validate capture time + if(capture_delay < 0 ) + { + CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); + MOZ_ASSERT(PR_FALSE); + return kMediaConduitMalformedArgument; + } + + //Conduit should have reception enabled before we ask for decoded + // samples + if(!mEngineReceiving) + { + CSFLogError(logTag, "%s Engine not Receiving ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + + lengthSamples = 0; //output paramter + + if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData, + samplingFreqHz, + capture_delay, + lengthSamples) == -1) + { + int error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s Getting audio data Failed %d", __FUNCTION__, error); + if(error == VE_RUNTIME_PLAY_ERROR) + { + return kMediaConduitPlayoutError; + } + return kMediaConduitUnknownError; + } + + // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc + mSamples += lengthSamples; + if (mSamples >= mLastSyncLog + samplingFreqHz) { + int jitter_buffer_delay_ms; + int playout_buffer_delay_ms; + int avsync_offset_ms; + if (GetAVStats(&jitter_buffer_delay_ms, + &playout_buffer_delay_ms, + &avsync_offset_ms)) { +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + if (avsync_offset_ms < 0) { + Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS, + -avsync_offset_ms); + } else { + Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS, + avsync_offset_ms); + } +#endif + CSFLogError(logTag, + "A/V sync: sync delta: %dms, audio jitter delay %dms, playout delay %dms", + avsync_offset_ms, jitter_buffer_delay_ms, playout_buffer_delay_ms); + } else { + CSFLogError(logTag, "A/V sync: GetAVStats failed"); + } + mLastSyncLog = mSamples; + } + +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) { + if (mProcessing.Length() > 0) { + unsigned int now; + mPtrVoEVideoSync->GetPlayoutTimestamp(mChannel, now); + if (static_cast<uint32_t>(now) != mLastTimestamp) { + mLastTimestamp = static_cast<uint32_t>(now); + // Find the block that includes this timestamp in the network input + while (mProcessing.Length() > 0) { + // FIX! assumes 20ms @ 48000Hz + // FIX handle wrap-around + if (mProcessing[0].mRTPTimeStamp + 20*(48000/1000) >= now) { + TimeDuration t = TimeStamp::Now() - mProcessing[0].mTimeStamp; + // Wrap-around? + int64_t delta = t.ToMilliseconds() + (now - mProcessing[0].mRTPTimeStamp)/(48000/1000); + LogTime(AsyncLatencyLogger::AudioRecvRTP, ((uint64_t) this), delta); + break; + } + mProcessing.RemoveElementAt(0); + } + } + } + } +#endif + CSFLogDebug(logTag,"%s GetAudioFrame:Got samples: length %d ",__FUNCTION__, + lengthSamples); + return kMediaConduitNoError; +} + +// Transport Layer Callbacks +MediaConduitErrorCode +WebrtcAudioConduit::ReceivedRTPPacket(const void *data, int len) +{ + CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, mChannel); + + if(mEngineReceiving) + { +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) { + // timestamp is at 32 bits in ([1]) + struct Processing insert = { TimeStamp::Now(), + ntohl(static_cast<const uint32_t *>(data)[1]) }; + mProcessing.AppendElement(insert); + } +#endif + + // XXX we need to get passed the time the packet was received + if(mPtrVoENetwork->ReceivedRTPPacket(mChannel, data, len) == -1) + { + int error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s RTP Processing Error %d", __FUNCTION__, error); + if(error == VE_RTP_RTCP_MODULE_ERROR) + { + return kMediaConduitRTPRTCPModuleError; + } + return kMediaConduitUnknownError; + } + } else { + CSFLogError(logTag, "Error: %s when not receiving", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::ReceivedRTCPPacket(const void *data, int len) +{ + CSFLogDebug(logTag, "%s : channel %d",__FUNCTION__, mChannel); + + if(mPtrVoENetwork->ReceivedRTCPPacket(mChannel, data, len) == -1) + { + int error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s RTCP Processing Error %d", __FUNCTION__, error); + if(error == VE_RTP_RTCP_MODULE_ERROR) + { + return kMediaConduitRTPRTCPModuleError; + } + return kMediaConduitUnknownError; + } + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::StopTransmitting() +{ + if(mEngineTransmitting) + { + CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); + if(mPtrVoEBase->StopSend(mChannel) == -1) + { + CSFLogError(logTag, "%s StopSend() Failed %d ", __FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitUnknownError; + } + mEngineTransmitting = false; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::StartTransmitting() +{ + if (!mEngineTransmitting) { + //Let's Send Transport State-machine on the Engine + if(mPtrVoEBase->StartSend(mChannel) == -1) + { + int error = mPtrVoEBase->LastError(); + CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error); + return kMediaConduitUnknownError; + } + mEngineTransmitting = true; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::StopReceiving() +{ + if(mEngineReceiving) + { + CSFLogDebug(logTag, "%s Engine Already Receiving. Attemping to Stop ", __FUNCTION__); + // AudioEngine doesn't fail fatally on stopping reception. Ref:voe_errors.h. + // hence we need not be strict in failing here on errors + mPtrVoEBase->StopReceive(mChannel); + CSFLogDebug(logTag, "%s Attemping to Stop playout ", __FUNCTION__); + if(mPtrVoEBase->StopPlayout(mChannel) == -1) + { + if( mPtrVoEBase->LastError() == VE_CANNOT_STOP_PLAYOUT) + { + CSFLogDebug(logTag, "%s Stop-Playout Failed %d", __FUNCTION__, mPtrVoEBase->LastError()); + return kMediaConduitPlayoutError; + } + } + mEngineReceiving = false; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcAudioConduit::StartReceiving() +{ + if (!mEngineReceiving) { + if(mPtrVoEBase->StartReceive(mChannel) == -1) + { + int error = mPtrVoEBase->LastError(); + CSFLogError(logTag , "%s StartReceive Failed %d ",__FUNCTION__, error); + if(error == VE_RECV_SOCKET_ERROR) + { + return kMediaConduitSocketError; + } + return kMediaConduitUnknownError; + } + + + if(mPtrVoEBase->StartPlayout(mChannel) == -1) + { + CSFLogError(logTag, "%s Starting playout Failed", __FUNCTION__); + return kMediaConduitPlayoutError; + } + mEngineReceiving = true; + } + + return kMediaConduitNoError; +} + +//WebRTC::RTP Callback Implementation +// Called on AudioGUM or MSG thread +int WebrtcAudioConduit::SendPacket(int channel, const void* data, size_t len) +{ + CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, channel); + +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) { + if (mProcessing.Length() > 0) { + TimeStamp started = mProcessing[0].mTimeStamp; + mProcessing.RemoveElementAt(0); + mProcessing.RemoveElementAt(0); // 20ms packetization! Could automate this by watching sizes + TimeDuration t = TimeStamp::Now() - started; + int64_t delta = t.ToMilliseconds(); + LogTime(AsyncLatencyLogger::AudioSendRTP, ((uint64_t) this), delta); + } + } +#endif + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mTransmitterTransport && + (mTransmitterTransport->SendRtpPacket(data, len) == NS_OK)) + { + CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__); + return len; + } else { + CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__); + return -1; + } +} + +// Called on WebRTC Process thread and perhaps others +int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, size_t len) +{ + CSFLogDebug(logTag, "%s : channel %d , len %lu, first rtcp = %u ", + __FUNCTION__, + channel, + (unsigned long) len, + static_cast<unsigned>(((uint8_t *) data)[1])); + + // We come here if we have only one pipeline/conduit setup, + // such as for unidirectional streams. + // We also end up here if we are receiving + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mReceiverTransport && + mReceiverTransport->SendRtcpPacket(data, len) == NS_OK) + { + // Might be a sender report, might be a receiver report, we don't know. + CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__); + return len; + } else if(mTransmitterTransport && + (mTransmitterTransport->SendRtcpPacket(data, len) == NS_OK)) { + CSFLogDebug(logTag, "%s Sent RTCP Packet (sender report) ", __FUNCTION__); + return len; + } else { + CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__); + return -1; + } +} + +/** + * Converts between CodecConfig to WebRTC Codec Structure. + */ + +bool +WebrtcAudioConduit::CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo, + webrtc::CodecInst& cinst) + { + const unsigned int plNameLength = codecInfo->mName.length(); + memset(&cinst, 0, sizeof(webrtc::CodecInst)); + if(sizeof(cinst.plname) < plNameLength+1) + { + CSFLogError(logTag, "%s Payload name buffer capacity mismatch ", + __FUNCTION__); + return false; + } + memcpy(cinst.plname, codecInfo->mName.c_str(), plNameLength); + cinst.plname[plNameLength]='\0'; + cinst.pltype = codecInfo->mType; + cinst.rate = codecInfo->mRate; + cinst.pacsize = codecInfo->mPacSize; + cinst.plfreq = codecInfo->mFreq; + if (codecInfo->mName == "G722") { + // Compensate for G.722 spec error in RFC 1890 + cinst.plfreq = 16000; + } + cinst.channels = codecInfo->mChannels; + return true; + } + +/** + * Supported Sampling Frequncies. + */ +bool +WebrtcAudioConduit::IsSamplingFreqSupported(int freq) const +{ + if(GetNum10msSamplesForFrequency(freq)) + { + return true; + } else { + return false; + } +} + +/* Return block-length of 10 ms audio frame in number of samples */ +unsigned int +WebrtcAudioConduit::GetNum10msSamplesForFrequency(int samplingFreqHz) const +{ + switch(samplingFreqHz) + { + case 16000: return 160; //160 samples + case 32000: return 320; //320 samples + case 44100: return 441; //441 samples + case 48000: return 480; //480 samples + default: return 0; // invalid or unsupported + } +} + +//Copy the codec passed into Conduit's database +bool +WebrtcAudioConduit::CopyCodecToDB(const AudioCodecConfig* codecInfo) +{ + + AudioCodecConfig* cdcConfig = new AudioCodecConfig(codecInfo->mType, + codecInfo->mName, + codecInfo->mFreq, + codecInfo->mPacSize, + codecInfo->mChannels, + codecInfo->mRate, + codecInfo->mFECEnabled); + mRecvCodecList.push_back(cdcConfig); + return true; +} + +/** + * Checks if 2 codec structs are same + */ +bool +WebrtcAudioConduit::CheckCodecsForMatch(const AudioCodecConfig* curCodecConfig, + const AudioCodecConfig* codecInfo) const +{ + if(!curCodecConfig) + { + return false; + } + + if(curCodecConfig->mType == codecInfo->mType && + (curCodecConfig->mName.compare(codecInfo->mName) == 0) && + curCodecConfig->mFreq == codecInfo->mFreq && + curCodecConfig->mPacSize == codecInfo->mPacSize && + curCodecConfig->mChannels == codecInfo->mChannels && + curCodecConfig->mRate == codecInfo->mRate) + { + return true; + } + + return false; +} + +/** + * Checks if the codec is already in Conduit's database + */ +bool +WebrtcAudioConduit::CheckCodecForMatch(const AudioCodecConfig* codecInfo) const +{ + //the db should have atleast one codec + for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) + { + if(CheckCodecsForMatch(mRecvCodecList[i],codecInfo)) + { + //match + return true; + } + } + //no match or empty local db + return false; +} + + +/** + * Perform validation on the codecConfig to be applied. + * Verifies if the codec is already applied. + */ +MediaConduitErrorCode +WebrtcAudioConduit::ValidateCodecConfig(const AudioCodecConfig* codecInfo, + bool send) +{ + bool codecAppliedAlready = false; + + if(!codecInfo) + { + CSFLogError(logTag, "%s Null CodecConfig ", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + if((codecInfo->mName.empty()) || + (codecInfo->mName.length() >= CODEC_PLNAME_SIZE)) + { + CSFLogError(logTag, "%s Invalid Payload Name Length ", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + //Only mono or stereo channels supported + if( (codecInfo->mChannels != 1) && (codecInfo->mChannels != 2)) + { + CSFLogError(logTag, "%s Channel Unsupported ", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + //check if we have the same codec already applied + if(send) + { + MutexAutoLock lock(mCodecMutex); + + codecAppliedAlready = CheckCodecsForMatch(mCurSendCodecConfig,codecInfo); + } else { + codecAppliedAlready = CheckCodecForMatch(codecInfo); + } + + if(codecAppliedAlready) + { + CSFLogDebug(logTag, "%s Codec %s Already Applied ", __FUNCTION__, codecInfo->mName.c_str()); + } + return kMediaConduitNoError; +} + +void +WebrtcAudioConduit::DumpCodecDB() const + { + for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) + { + CSFLogDebug(logTag,"Payload Name: %s", mRecvCodecList[i]->mName.c_str()); + CSFLogDebug(logTag,"Payload Type: %d", mRecvCodecList[i]->mType); + CSFLogDebug(logTag,"Payload Frequency: %d", mRecvCodecList[i]->mFreq); + CSFLogDebug(logTag,"Payload PacketSize: %d", mRecvCodecList[i]->mPacSize); + CSFLogDebug(logTag,"Payload Channels: %d", mRecvCodecList[i]->mChannels); + CSFLogDebug(logTag,"Payload Sampling Rate: %d", mRecvCodecList[i]->mRate); + } + } +}// end namespace diff --git a/media/webrtc/signaling/src/media-conduit/AudioConduit.h b/media/webrtc/signaling/src/media-conduit/AudioConduit.h new file mode 100755 index 000000000..228736dcc --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h @@ -0,0 +1,304 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + + +#ifndef AUDIO_SESSION_H_ +#define AUDIO_SESSION_H_ + +#include "mozilla/Attributes.h" +#include "mozilla/TimeStamp.h" +#include "nsTArray.h" + +#include "MediaConduitInterface.h" +#include "MediaEngineWrapper.h" + +// Audio Engine Includes +#include "webrtc/common_types.h" +#include "webrtc/voice_engine/include/voe_base.h" +#include "webrtc/voice_engine/include/voe_volume_control.h" +#include "webrtc/voice_engine/include/voe_codec.h" +#include "webrtc/voice_engine/include/voe_file.h" +#include "webrtc/voice_engine/include/voe_network.h" +#include "webrtc/voice_engine/include/voe_external_media.h" +#include "webrtc/voice_engine/include/voe_audio_processing.h" +#include "webrtc/voice_engine/include/voe_video_sync.h" +#include "webrtc/voice_engine/include/voe_rtp_rtcp.h" +//Some WebRTC types for short notations + using webrtc::VoEBase; + using webrtc::VoENetwork; + using webrtc::VoECodec; + using webrtc::VoEExternalMedia; + using webrtc::VoEAudioProcessing; + using webrtc::VoEVideoSync; + using webrtc::VoERTP_RTCP; +/** This file hosts several structures identifying different aspects + * of a RTP Session. + */ +namespace mozilla { +// Helper function + +DOMHighResTimeStamp +NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow); + +/** + * Concrete class for Audio session. Hooks up + * - media-source and target to external transport + */ +class WebrtcAudioConduit:public AudioSessionConduit + ,public webrtc::Transport +{ +public: + //VoiceEngine defined constant for Payload Name Size. + static const unsigned int CODEC_PLNAME_SIZE; + + /** + * APIs used by the registered external transport to this Conduit to + * feed in received RTP Frames to the VoiceEngine for decoding + */ + virtual MediaConduitErrorCode ReceivedRTPPacket(const void *data, int len) override; + + /** + * APIs used by the registered external transport to this Conduit to + * feed in received RTCP Frames to the VoiceEngine for decoding + */ + virtual MediaConduitErrorCode ReceivedRTCPPacket(const void *data, int len) override; + + virtual MediaConduitErrorCode StopTransmitting() override; + virtual MediaConduitErrorCode StartTransmitting() override; + virtual MediaConduitErrorCode StopReceiving() override; + virtual MediaConduitErrorCode StartReceiving() override; + + /** + * Function to configure send codec for the audio session + * @param sendSessionConfig: CodecConfiguration + * @result: On Success, the audio engine is configured with passed in codec for send + * On failure, audio engine transmit functionality is disabled. + * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting + * transmission sub-system on the engine. + */ + virtual MediaConduitErrorCode ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) override; + /** + * Function to configure list of receive codecs for the audio session + * @param sendSessionConfig: CodecConfiguration + * @result: On Success, the audio engine is configured with passed in codec for send + * Also the playout is enabled. + * On failure, audio engine transmit functionality is disabled. + * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting + * transmission sub-system on the engine. + */ + virtual MediaConduitErrorCode ConfigureRecvMediaCodecs( + const std::vector<AudioCodecConfig* >& codecConfigList) override; + /** + * Function to enable the audio level extension + * @param enabled: enable extension + */ + virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) override; + + /** + * Register External Transport to this Conduit. RTP and RTCP frames from the VoiceEngine + * shall be passed to the registered transport for transporting externally. + */ + virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) override; + + virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) override; + + /** + * Function to deliver externally captured audio sample for encoding and transport + * @param audioData [in]: Pointer to array containing a frame of audio + * @param lengthSamples [in]: Length of audio frame in samples in multiple of 10 milliseconds + * Ex: Frame length is 160, 320, 440 for 16, 32, 44 kHz sampling rates + respectively. + audioData[] should be of lengthSamples in size + say, for 16kz sampling rate, audioData[] should contain 160 + samples of 16-bits each for a 10m audio frame. + * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz ( 16000, 32000 ...) + * @param capture_delay [in]: Approx Delay from recording until it is delivered to VoiceEngine + in milliseconds. + * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked + * This ensures the inserted audio-samples can be transmitted by the conduit + * + */ + virtual MediaConduitErrorCode SendAudioFrame(const int16_t speechData[], + int32_t lengthSamples, + int32_t samplingFreqHz, + int32_t capture_time) override; + + /** + * Function to grab a decoded audio-sample from the media engine for rendering + * / playoutof length 10 milliseconds. + * + * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied + * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..) + * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback + * @param lengthSamples [out]: Will contain length of the audio frame in samples at return. + Ex: A value of 160 implies 160 samples each of 16-bits was copied + into speechData + * NOTE: This function should be invoked every 10 milliseconds for the best + * peformance + * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked + * This ensures the decoded samples are ready for reading and playout is enabled. + * + */ + virtual MediaConduitErrorCode GetAudioFrame(int16_t speechData[], + int32_t samplingFreqHz, + int32_t capture_delay, + int& lengthSamples) override; + + + /** + * Webrtc transport implementation to send and receive RTP packet. + * AudioConduit registers itself as ExternalTransport to the VoiceEngine + */ + virtual int SendPacket(int channel, const void *data, size_t len) override; + + /** + * Webrtc transport implementation to send and receive RTCP packet. + * AudioConduit registers itself as ExternalTransport to the VoiceEngine + */ + virtual int SendRTCPPacket(int channel, const void *data, size_t len) override; + + + virtual uint64_t CodecPluginID() override { return 0; } + + WebrtcAudioConduit(): + mVoiceEngine(nullptr), + mTransportMonitor("WebrtcAudioConduit"), + mTransmitterTransport(nullptr), + mReceiverTransport(nullptr), + mEngineTransmitting(false), + mEngineReceiving(false), + mChannel(-1), + mDtmfEnabled(false), + mCodecMutex("AudioConduit codec db"), + mCaptureDelay(150), +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + mLastTimestamp(0), +#endif // MOZILLA_INTERNAL_API + mSamples(0), + mLastSyncLog(0) + { + } + + virtual ~WebrtcAudioConduit(); + + MediaConduitErrorCode Init(); + + int GetChannel() { return mChannel; } + webrtc::VoiceEngine* GetVoiceEngine() { return mVoiceEngine; } + bool SetLocalSSRC(unsigned int ssrc) override; + bool GetLocalSSRC(unsigned int* ssrc) override; + bool GetRemoteSSRC(unsigned int* ssrc) override; + bool SetLocalCNAME(const char* cname) override; + bool GetVideoEncoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* droppedFrames) override + { + return false; + } + bool GetVideoDecoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* discardedPackets) override + { + return false; + } + bool GetAVStats(int32_t* jitterBufferDelayMs, + int32_t* playoutBufferDelayMs, + int32_t* avSyncOffsetMs) override; + bool GetRTPStats(unsigned int* jitterMs, unsigned int* cumulativeLost) override; + bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp, + uint32_t* jitterMs, + uint32_t* packetsReceived, + uint64_t* bytesReceived, + uint32_t *cumulativeLost, + int32_t* rttMs) override; + bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp, + unsigned int* packetsSent, + uint64_t* bytesSent) override; + + bool SetDtmfPayloadType(unsigned char type) override; + + bool InsertDTMFTone(int channel, int eventCode, bool outOfBand, + int lengthMs, int attenuationDb) override; + +private: + WebrtcAudioConduit(const WebrtcAudioConduit& other) = delete; + void operator=(const WebrtcAudioConduit& other) = delete; + + //Local database of currently applied receive codecs + typedef std::vector<AudioCodecConfig* > RecvCodecList; + + //Function to convert between WebRTC and Conduit codec structures + bool CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo, + webrtc::CodecInst& cinst); + + //Checks if given sampling frequency is supported + bool IsSamplingFreqSupported(int freq) const; + + //Generate block size in sample lenght for a given sampling frequency + unsigned int GetNum10msSamplesForFrequency(int samplingFreqHz) const; + + // Function to copy a codec structure to Conduit's database + bool CopyCodecToDB(const AudioCodecConfig* codecInfo); + + // Functions to verify if the codec passed is already in + // conduits database + bool CheckCodecForMatch(const AudioCodecConfig* codecInfo) const; + bool CheckCodecsForMatch(const AudioCodecConfig* curCodecConfig, + const AudioCodecConfig* codecInfo) const; + //Checks the codec to be applied + MediaConduitErrorCode ValidateCodecConfig(const AudioCodecConfig* codecInfo, bool send); + + //Utility function to dump recv codec database + void DumpCodecDB() const; + + webrtc::VoiceEngine* mVoiceEngine; + mozilla::ReentrantMonitor mTransportMonitor; + RefPtr<TransportInterface> mTransmitterTransport; + RefPtr<TransportInterface> mReceiverTransport; + ScopedCustomReleasePtr<webrtc::VoENetwork> mPtrVoENetwork; + ScopedCustomReleasePtr<webrtc::VoEBase> mPtrVoEBase; + ScopedCustomReleasePtr<webrtc::VoECodec> mPtrVoECodec; + ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mPtrVoEXmedia; + ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mPtrVoEProcessing; + ScopedCustomReleasePtr<webrtc::VoEVideoSync> mPtrVoEVideoSync; + ScopedCustomReleasePtr<webrtc::VoERTP_RTCP> mPtrVoERTP_RTCP; + ScopedCustomReleasePtr<webrtc::VoERTP_RTCP> mPtrRTP; + //engine states of our interets + mozilla::Atomic<bool> mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up + mozilla::Atomic<bool> mEngineReceiving; // If true => VoiceEngine Receive-subsystem is up + // and playout is enabled + // Keep track of each inserted RTP block and the time it was inserted + // so we can estimate the clock time for a specific TimeStamp coming out + // (for when we send data to MediaStreamTracks). Blocks are aged out as needed. + struct Processing { + TimeStamp mTimeStamp; + uint32_t mRTPTimeStamp; // RTP timestamps received + }; + AutoTArray<Processing,8> mProcessing; + + int mChannel; + bool mDtmfEnabled; + RecvCodecList mRecvCodecList; + + Mutex mCodecMutex; // protects mCurSendCodecConfig + nsAutoPtr<AudioCodecConfig> mCurSendCodecConfig; + + // Current "capture" delay (really output plus input delay) + int32_t mCaptureDelay; + +#if !defined(MOZILLA_EXTERNAL_LINKAGE) + uint32_t mLastTimestamp; +#endif // MOZILLA_INTERNAL_API + + uint32_t mSamples; + uint32_t mLastSyncLog; +}; + +} // end namespace + +#endif diff --git a/media/webrtc/signaling/src/media-conduit/CodecConfig.h b/media/webrtc/signaling/src/media-conduit/CodecConfig.h new file mode 100755 index 000000000..308c97948 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/CodecConfig.h @@ -0,0 +1,166 @@ + +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef CODEC_CONFIG_H_ +#define CODEC_CONFIG_H_ + +#include <string> +#include <vector> + +#include "signaling/src/common/EncodingConstraints.h" + +namespace mozilla { + +/** + * Minimalistic Audio Codec Config Params + */ +struct AudioCodecConfig +{ + /* + * The data-types for these properties mimic the + * corresponding webrtc::CodecInst data-types. + */ + int mType; + std::string mName; + int mFreq; + int mPacSize; + int mChannels; + int mRate; + + bool mFECEnabled; + bool mDtmfEnabled; + + // OPUS-specific + int mMaxPlaybackRate; + + /* Default constructor is not provided since as a consumer, we + * can't decide the default configuration for the codec + */ + explicit AudioCodecConfig(int type, std::string name, + int freq, int pacSize, + int channels, int rate, bool FECEnabled) + : mType(type), + mName(name), + mFreq(freq), + mPacSize(pacSize), + mChannels(channels), + mRate(rate), + mFECEnabled(FECEnabled), + mDtmfEnabled(false), + mMaxPlaybackRate(0) + { + } +}; + +/* + * Minimalistic video codec configuration + * More to be added later depending on the use-case + */ + +#define MAX_SPROP_LEN 128 + +// used for holding SDP negotiation results +struct VideoCodecConfigH264 +{ + char sprop_parameter_sets[MAX_SPROP_LEN]; + int packetization_mode; + int profile_level_id; + int tias_bw; +}; + + +// class so the std::strings can get freed more easily/reliably +class VideoCodecConfig +{ +public: + /* + * The data-types for these properties mimic the + * corresponding webrtc::VideoCodec data-types. + */ + int mType; // payload type + std::string mName; + + std::vector<std::string> mAckFbTypes; + std::vector<std::string> mNackFbTypes; + std::vector<std::string> mCcmFbTypes; + // Don't pass mOtherFbTypes from JsepVideoCodecDescription because we'd have + // to drag SdpRtcpFbAttributeList::Feedback along too. + bool mRembFbSet; + bool mFECFbSet; + + EncodingConstraints mEncodingConstraints; + struct SimulcastEncoding { + std::string rid; + EncodingConstraints constraints; + }; + std::vector<SimulcastEncoding> mSimulcastEncodings; + std::string mSpropParameterSets; + uint8_t mProfile; + uint8_t mConstraints; + uint8_t mLevel; + uint8_t mPacketizationMode; + // TODO: add external negotiated SPS/PPS + + VideoCodecConfig(int type, + std::string name, + const EncodingConstraints& constraints, + const struct VideoCodecConfigH264 *h264 = nullptr) : + mType(type), + mName(name), + mFECFbSet(false), + mEncodingConstraints(constraints), + mProfile(0x42), + mConstraints(0xE0), + mLevel(0x0C), + mPacketizationMode(1) + { + if (h264) { + mProfile = (h264->profile_level_id & 0x00FF0000) >> 16; + mConstraints = (h264->profile_level_id & 0x0000FF00) >> 8; + mLevel = (h264->profile_level_id & 0x000000FF); + mPacketizationMode = h264->packetization_mode; + mSpropParameterSets = h264->sprop_parameter_sets; + } + } + + // Nothing seems to use this right now. Do we intend to support this + // someday? + bool RtcpFbAckIsSet(const std::string& type) const + { + for (auto i = mAckFbTypes.begin(); i != mAckFbTypes.end(); ++i) { + if (*i == type) { + return true; + } + } + return false; + } + + bool RtcpFbNackIsSet(const std::string& type) const + { + for (auto i = mNackFbTypes.begin(); i != mNackFbTypes.end(); ++i) { + if (*i == type) { + return true; + } + } + return false; + } + + bool RtcpFbCcmIsSet(const std::string& type) const + { + for (auto i = mCcmFbTypes.begin(); i != mCcmFbTypes.end(); ++i) { + if (*i == type) { + return true; + } + } + return false; + } + + bool RtcpFbRembIsSet() const { return mRembFbSet; } + + bool RtcpFbFECIsSet() const { return mFECFbSet; } + +}; +} +#endif diff --git a/media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp b/media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp new file mode 100644 index 000000000..eb03c0bf8 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp @@ -0,0 +1,183 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CodecStatistics.h" + +#include "CSFLog.h" +#include "mozilla/Telemetry.h" + +using namespace mozilla; +using namespace webrtc; + +// use the same tag as VideoConduit +static const char* logTag ="WebrtcVideoSessionConduit"; + +VideoCodecStatistics::VideoCodecStatistics(int channel, + ViECodec* codec) : + mChannel(channel), + mSentRawFrames(0), + mPtrViECodec(codec), + mEncoderDroppedFrames(0), + mDecoderDiscardedPackets(0), + mRegisteredEncode(false), + mRegisteredDecode(false), + mReceiveState(kReceiveStateInitial) +#ifdef MOZILLA_INTERNAL_API + , mRecoveredBeforeLoss(0) + , mRecoveredLosses(0) +#endif +{ + MOZ_ASSERT(mPtrViECodec); +} + +VideoCodecStatistics::~VideoCodecStatistics() +{ + if (mRegisteredEncode) { + mPtrViECodec->DeregisterEncoderObserver(mChannel); + } + if (mRegisteredDecode) { + mPtrViECodec->DeregisterDecoderObserver(mChannel); + } +} + +void VideoCodecStatistics::Register(bool encoder) +{ + if (encoder && !mRegisteredEncode) { + mPtrViECodec->RegisterEncoderObserver(mChannel, *this); + mRegisteredEncode = true; + } else if (!encoder && !mRegisteredDecode) { + mPtrViECodec->RegisterDecoderObserver(mChannel, *this); + mRegisteredDecode = true; + } +} + +void VideoCodecStatistics::OutgoingRate(const int video_channel, + const uint32_t framerate, + const uint32_t bitrate) +{ + unsigned int keyFrames, deltaFrames; + mPtrViECodec->GetSendCodecStatistics(video_channel, keyFrames, deltaFrames); + uint32_t dropped = mSentRawFrames - (keyFrames + deltaFrames); + CSFLogDebug(logTag, + "encoder statistics - framerate: %u, bitrate: %u, dropped frames: %u", + framerate, bitrate, dropped); + mEncoderBitRate.Push(bitrate); + mEncoderFps.Push(framerate); + mEncoderDroppedFrames += dropped; +} + +void VideoCodecStatistics::IncomingCodecChanged(const int video_channel, + const VideoCodec& video_codec) +{ + CSFLogDebug(logTag, + "channel %d change codec to \"%s\" ", + video_channel, video_codec.plName); +} + +void VideoCodecStatistics::IncomingRate(const int video_channel, + const unsigned int framerate, + const unsigned int bitrate) +{ + unsigned int discarded = mPtrViECodec->GetDiscardedPackets(video_channel); + CSFLogDebug(logTag, + "decoder statistics - framerate: %u, bitrate: %u, discarded packets %u", + framerate, bitrate, discarded); + mDecoderBitRate.Push(bitrate); + mDecoderFps.Push(framerate); + mDecoderDiscardedPackets += discarded; +} + +void VideoCodecStatistics::ReceiveStateChange(const int aChannel, + VideoReceiveState aState) +{ + CSFLogDebug(logTag,"New state for %d: %d (was %d)", aChannel, aState, mReceiveState); +#ifdef MOZILLA_INTERNAL_API + if (mFirstDecodeTime.IsNull()) { + mFirstDecodeTime = TimeStamp::Now(); + } + /* + * Invalid transitions: + * WaitingKey -> PreemptiveNACK + * DecodingWithErrors -> PreemptiveNACK + */ + + switch (mReceiveState) { + case kReceiveStateNormal: + case kReceiveStateInitial: + // in a normal state + if (aState != kReceiveStateNormal && aState != kReceiveStateInitial) { + // no longer in a normal state + if (aState != kReceiveStatePreemptiveNACK) { + mReceiveFailureTime = TimeStamp::Now(); + } + } // else Normal<->Initial transition + break; + default: + // not in a normal state + if (aState == kReceiveStateNormal || aState == kReceiveStateInitial) { + + if (mReceiveState == kReceiveStatePreemptiveNACK) { + mRecoveredBeforeLoss++; + CSFLogError(logTag, "Video error avoided by NACK recovery"); + } else if (!mReceiveFailureTime.IsNull()) { // safety + TimeDuration timeDelta = TimeStamp::Now() - mReceiveFailureTime; + CSFLogError(logTag, "Video error duration: %u ms", + static_cast<uint32_t>(timeDelta.ToMilliseconds())); + Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_ERROR_RECOVERY_MS, + static_cast<uint32_t>(timeDelta.ToMilliseconds())); + + mRecoveredLosses++; // to calculate losses per minute + mTotalLossTime += timeDelta; // To calculate % time in recovery + } + } // else non-Normal to different non-normal transition + break; + } + +#endif + + mReceiveState = aState; +} + +void VideoCodecStatistics::EndOfCallStats() +{ +#ifdef MOZILLA_INTERNAL_API + if (!mFirstDecodeTime.IsNull()) { + TimeDuration callDelta = TimeStamp::Now() - mFirstDecodeTime; + if (callDelta.ToSeconds() != 0) { + uint32_t recovered_per_min = mRecoveredBeforeLoss/(callDelta.ToSeconds()/60); + CSFLogError(logTag, "Video recovery before error per min %u", recovered_per_min); + Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN, + recovered_per_min); + uint32_t err_per_min = mRecoveredLosses/(callDelta.ToSeconds()/60); + CSFLogError(logTag, "Video recovery after error per min %u", err_per_min); + Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN, + err_per_min); + float percent = (mTotalLossTime.ToSeconds()*100)/callDelta.ToSeconds(); + CSFLogError(logTag, "Video error time percentage %f%%", percent); + Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_DECODE_ERROR_TIME_PERMILLE, + static_cast<uint32_t>(percent*10)); + } + } +#endif +} + +void VideoCodecStatistics::SentFrame() +{ + mSentRawFrames++; +} + +void VideoCodecStatistics::Dump() +{ + Dump(mEncoderBitRate, "encoder bitrate"); + Dump(mEncoderFps, "encoder fps"); + Dump(mDecoderBitRate, "decoder bitrate"); + Dump(mDecoderFps, "decoder fps"); +} + +void VideoCodecStatistics::Dump(RunningStat& s, const char *name) +{ + CSFLogDebug(logTag, + "%s, mean: %f, variance: %f, standard deviation: %f", + name, s.Mean(), s.Variance(), s.StandardDeviation()); +} diff --git a/media/webrtc/signaling/src/media-conduit/CodecStatistics.h b/media/webrtc/signaling/src/media-conduit/CodecStatistics.h new file mode 100644 index 000000000..ab81a6f33 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/CodecStatistics.h @@ -0,0 +1,111 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef CODEC_STATISTICS_H_ +#define CODEC_STATISTICS_H_ +#include <math.h> + +#include "nsTArray.h" +#include "nsISupportsImpl.h" +#include "mozilla/TimeStamp.h" +#include "webrtc/common_types.h" +#include "webrtc/video_engine/include/vie_codec.h" +#include "MediaEngineWrapper.h" +#include "RunningStat.h" + +namespace mozilla { + +// Statistics-gathering observer for Video Encoder and Decoder + +class VideoCodecStatistics : public webrtc::ViEEncoderObserver + , public webrtc::ViEDecoderObserver +{ +public: + VideoCodecStatistics(int channel, webrtc::ViECodec* vieCodec); + ~VideoCodecStatistics(); + void Register(bool encoder); + + void SentFrame(); + virtual void OutgoingRate(const int video_channel, + const unsigned int framerate, const unsigned int bitrate) override; + + virtual void IncomingCodecChanged(const int video_channel, + const webrtc::VideoCodec& video_codec) override; + + virtual void IncomingRate(const int video_channel, + const unsigned int framerate, + const unsigned int bitrate) override; + + void ReceiveStateChange(const int video_channel, webrtc::VideoReceiveState state) override; + + void EndOfCallStats(); + + virtual void RequestNewKeyFrame(const int video_channel) override {}; + + virtual void SuspendChange(int video_channel, bool is_suspended) override {}; + virtual void DecoderTiming(int decode_ms, + int max_decode_ms, + int current_delay_ms, + int target_delay_ms, + int jitter_buffer_ms, + int min_playout_delay_ms, + int render_delay_ms) override {} + + bool GetEncoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* droppedFrames) + { + *framerateMean = mEncoderFps.Mean(); + *framerateStdDev = mEncoderFps.StandardDeviation(); + *bitrateMean = mEncoderBitRate.Mean(); + *bitrateStdDev = mEncoderBitRate.StandardDeviation(); + *droppedFrames = mEncoderDroppedFrames; + return true; + } + + bool GetDecoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* discardedPackets) + { + *framerateMean = mDecoderFps.Mean(); + *framerateStdDev = mDecoderFps.StandardDeviation(); + *bitrateMean = mDecoderBitRate.Mean(); + *bitrateStdDev = mDecoderBitRate.StandardDeviation(); + *discardedPackets = mDecoderDiscardedPackets; + return true; + } + + void Dump(); +private: + void Dump(RunningStat& s, const char *name); + + int mChannel; + uint32_t mSentRawFrames; + ScopedCustomReleasePtr<webrtc::ViECodec> mPtrViECodec; // back-pointer + + RunningStat mEncoderBitRate; + RunningStat mEncoderFps; + uint32_t mEncoderDroppedFrames; + RunningStat mDecoderBitRate; + RunningStat mDecoderFps; + uint32_t mDecoderDiscardedPackets; + bool mRegisteredEncode; + bool mRegisteredDecode; + + webrtc::VideoReceiveState mReceiveState; +#ifdef MOZILLA_INTERNAL_API + TimeStamp mFirstDecodeTime; + TimeStamp mReceiveFailureTime; + TimeDuration mTotalLossTime; + uint32_t mRecoveredBeforeLoss; + uint32_t mRecoveredLosses; +#endif +}; + +} + +#endif //CODEC_STATISTICS_H_ diff --git a/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp new file mode 100644 index 000000000..0c4d81e44 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp @@ -0,0 +1,18 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "WebrtcGmpVideoCodec.h" +#include "GmpVideoCodec.h" + +namespace mozilla { + +VideoEncoder* GmpVideoCodec::CreateEncoder() { + return static_cast<VideoEncoder*>(new WebrtcVideoEncoderProxy()); +} + +VideoDecoder* GmpVideoCodec::CreateDecoder() { + return static_cast<VideoDecoder*>(new WebrtcVideoDecoderProxy()); +} + +} diff --git a/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h new file mode 100644 index 000000000..340150409 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h @@ -0,0 +1,19 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef GMPVIDEOCODEC_H_ +#define GMPVIDEOCODEC_H_ + +#include "MediaConduitInterface.h" + +namespace mozilla { +class GmpVideoCodec { + public: + static VideoEncoder* CreateEncoder(); + static VideoDecoder* CreateDecoder(); +}; + +} + +#endif diff --git a/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp new file mode 100644 index 000000000..0c6c2fdde --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp @@ -0,0 +1,31 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CSFLog.h" +#include "nspr.h" + +#include "WebrtcMediaCodecVP8VideoCodec.h" +#include "MediaCodecVideoCodec.h" + +namespace mozilla { + +static const char* logTag ="MediaCodecVideoCodec"; + +VideoEncoder* MediaCodecVideoCodec::CreateEncoder(CodecType aCodecType) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + if (aCodecType == CODEC_VP8) { + return new WebrtcMediaCodecVP8VideoEncoder(); + } + return nullptr; +} + +VideoDecoder* MediaCodecVideoCodec::CreateDecoder(CodecType aCodecType) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + if (aCodecType == CODEC_VP8) { + return new WebrtcMediaCodecVP8VideoDecoder(); + } + return nullptr; +} + +} diff --git a/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h new file mode 100644 index 000000000..50dde8211 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h @@ -0,0 +1,31 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaCodecVideoCodec_h__ +#define MediaCodecVideoCodec_h__ + +#include "MediaConduitInterface.h" + +namespace mozilla { +class MediaCodecVideoCodec { + public: + enum CodecType { + CODEC_VP8, + }; + /** + * Create encoder object for codec type |aCodecType|. Return |nullptr| when + * failed. + */ + static VideoEncoder* CreateEncoder(CodecType aCodecType); + + /** + * Create decoder object for codec type |aCodecType|. Return |nullptr| when + * failed. + */ + static VideoDecoder* CreateDecoder(CodecType aCodecType); +}; + +} + +#endif // MediaCodecVideoCodec_h__ diff --git a/media/webrtc/signaling/src/media-conduit/MediaConduitErrors.h b/media/webrtc/signaling/src/media-conduit/MediaConduitErrors.h new file mode 100755 index 000000000..3709d59a0 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/MediaConduitErrors.h @@ -0,0 +1,48 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + + +#ifndef MEDIA_SESSION_ERRORS_H_ +#define MEDIA_SESSION_ERRORS_H_ + +namespace mozilla +{ +enum MediaConduitErrorCode +{ +kMediaConduitNoError = 0, // 0 for Success,greater than 0 imples error +kMediaConduitSessionNotInited = 10100, // Session not initialized.10100 serves as + // base for the conduit errors +kMediaConduitMalformedArgument, // Malformed input to Conduit API +kMediaConduitCaptureError, // WebRTC capture APIs failed +kMediaConduitInvalidSendCodec, // Wrong Send codec +kMediaConduitInvalidReceiveCodec, // Wrong Recv Codec +kMediaConduitCodecInUse, // Already applied Codec +kMediaConduitInvalidRenderer, // Null or Wrong Renderer object +kMediaConduitRendererFail, // Add Render called multiple times +kMediaConduitSendingAlready, // Engine already trasmitting +kMediaConduitReceivingAlready, // Engine already receiving +kMediaConduitTransportRegistrationFail,// Null or wrong transport interface +kMediaConduitInvalidTransport, // Null or wrong transport interface +kMediaConduitChannelError, // Configuration Error +kMediaConduitSocketError, // Media Engine transport socket error +kMediaConduitRTPRTCPModuleError, // Couldn't start RTP/RTCP processing +kMediaConduitRTPProcessingFailed, // Processing incoming RTP frame failed +kMediaConduitUnknownError, // More information can be found in logs +kMediaConduitExternalRecordingError, // Couldn't start external recording +kMediaConduitRecordingError, // Runtime recording error +kMediaConduitExternalPlayoutError, // Couldn't start external playout +kMediaConduitPlayoutError, // Runtime playout error +kMediaConduitMTUError, // Can't set MTU +kMediaConduitRTCPStatusError, // Can't set RTCP mode +kMediaConduitKeyFrameRequestError, // Can't set KeyFrameRequest mode +kMediaConduitNACKStatusError, // Can't set NACK mode +kMediaConduitTMMBRStatusError, // Can't set TMMBR mode +kMediaConduitFECStatusError, // Can't set FEC mode +kMediaConduitHybridNACKFECStatusError // Can't set Hybrid NACK / FEC mode +}; + +} + +#endif + diff --git a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h new file mode 100755 index 000000000..05c34fea0 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h @@ -0,0 +1,495 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MEDIA_CONDUIT_ABSTRACTION_ +#define MEDIA_CONDUIT_ABSTRACTION_ + +#include "nsISupportsImpl.h" +#include "nsXPCOM.h" +#include "nsDOMNavigationTiming.h" +#include "mozilla/RefPtr.h" +#include "CodecConfig.h" +#include "VideoTypes.h" +#include "MediaConduitErrors.h" + +#include "ImageContainer.h" + +#include "webrtc/common_types.h" +namespace webrtc { +class I420VideoFrame; +} + +#include <vector> + +namespace mozilla { +/** + * Abstract Interface for transporting RTP packets - audio/vidoeo + * The consumers of this interface are responsible for passing in + * the RTPfied media packets + */ +class TransportInterface +{ +protected: + virtual ~TransportInterface() {} + +public: + /** + * RTP Transport Function to be implemented by concrete transport implementation + * @param data : RTP Packet (audio/video) to be transported + * @param len : Length of the media packet + * @result : NS_OK on success, NS_ERROR_FAILURE otherwise + */ + virtual nsresult SendRtpPacket(const void* data, int len) = 0; + + /** + * RTCP Transport Function to be implemented by concrete transport implementation + * @param data : RTCP Packet to be transported + * @param len : Length of the RTCP packet + * @result : NS_OK on success, NS_ERROR_FAILURE otherwise + */ + virtual nsresult SendRtcpPacket(const void* data, int len) = 0; + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TransportInterface) +}; + +/** + * This class wraps image object for VideoRenderer::RenderVideoFrame() + * callback implementation to use for rendering. + */ +class ImageHandle +{ +public: + explicit ImageHandle(layers::Image* image) : mImage(image) {} + + const RefPtr<layers::Image>& GetImage() const { return mImage; } + +private: + RefPtr<layers::Image> mImage; +}; + +/** + * 1. Abstract renderer for video data + * 2. This class acts as abstract interface between the video-engine and + * video-engine agnostic renderer implementation. + * 3. Concrete implementation of this interface is responsible for + * processing and/or rendering the obtained raw video frame to appropriate + * output , say, <video> + */ +class VideoRenderer +{ +protected: + virtual ~VideoRenderer() {} + +public: + /** + * Callback Function reportng any change in the video-frame dimensions + * @param width: current width of the video @ decoder + * @param height: current height of the video @ decoder + * @param number_of_streams: number of participating video streams + */ + virtual void FrameSizeChange(unsigned int width, + unsigned int height, + unsigned int number_of_streams) = 0; + + /** + * Callback Function reporting decoded I420 frame for processing. + * @param buffer: pointer to decoded video frame + * @param buffer_size: size of the decoded frame + * @param time_stamp: Decoder timestamp, typically 90KHz as per RTP + * @render_time: Wall-clock time at the decoder for synchronization + * purposes in milliseconds + * @handle: opaque handle for image object of decoded video frame. + * NOTE: If decoded video frame is passed through buffer , it is the + * responsibility of the concrete implementations of this class to own copy + * of the frame if needed for time longer than scope of this callback. + * Such implementations should be quick in processing the frames and return + * immediately. + * On the other hand, if decoded video frame is passed through handle, the + * implementations should keep a reference to the (ref-counted) image object + * inside until it's no longer needed. + */ + virtual void RenderVideoFrame(const unsigned char* buffer, + size_t buffer_size, + uint32_t time_stamp, + int64_t render_time, + const ImageHandle& handle) = 0; + virtual void RenderVideoFrame(const unsigned char* buffer, + size_t buffer_size, + uint32_t y_stride, + uint32_t cbcr_stride, + uint32_t time_stamp, + int64_t render_time, + const ImageHandle& handle) = 0; + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer) +}; + + +/** + * Generic Interface for representing Audio/Video Session + * MediaSession conduit is identified by 2 main components + * 1. Attached Transport Interface for inbound and outbound RTP transport + * 2. Attached Renderer Interface for rendering media data off the network + * This class hides specifics of Media-Engine implementation from the consumers + * of this interface. + * Also provides codec configuration API for the media sent and recevied + */ +class MediaSessionConduit +{ +protected: + virtual ~MediaSessionConduit() {} + +public: + enum Type { AUDIO, VIDEO } ; + + virtual Type type() const = 0; + + /** + * Function triggered on Incoming RTP packet from the remote + * endpoint by the transport implementation. + * @param data : RTP Packet (audio/video) to be processed + * @param len : Length of the media packet + * Obtained packets are passed to the Media-Engine for further + * processing , say, decoding + */ + virtual MediaConduitErrorCode ReceivedRTPPacket(const void *data, int len) = 0; + + /** + * Function triggered on Incoming RTCP packet from the remote + * endpoint by the transport implementation. + * @param data : RTCP Packet (audio/video) to be processed + * @param len : Length of the media packet + * Obtained packets are passed to the Media-Engine for further + * processing , say, decoding + */ + virtual MediaConduitErrorCode ReceivedRTCPPacket(const void *data, int len) = 0; + + virtual MediaConduitErrorCode StopTransmitting() = 0; + virtual MediaConduitErrorCode StartTransmitting() = 0; + virtual MediaConduitErrorCode StopReceiving() = 0; + virtual MediaConduitErrorCode StartReceiving() = 0; + + + /** + * Function to attach transmitter transport end-point of the Media conduit. + * @param aTransport: Reference to the concrete teansport implementation + * When nullptr, unsets the transmitter transport endpoint. + * Note: Multiple invocations of this call , replaces existing transport with + * with the new one. + * Note: This transport is used for RTP, and RTCP if no receiver transport is + * set. In the future, we should ensure that RTCP sender reports use this + * regardless of whether the receiver transport is set. + */ + virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) = 0; + + /** + * Function to attach receiver transport end-point of the Media conduit. + * @param aTransport: Reference to the concrete teansport implementation + * When nullptr, unsets the receiver transport endpoint. + * Note: Multiple invocations of this call , replaces existing transport with + * with the new one. + * Note: This transport is used for RTCP. + * Note: In the future, we should avoid using this for RTCP sender reports. + */ + virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) = 0; + + virtual bool SetLocalSSRC(unsigned int ssrc) = 0; + virtual bool GetLocalSSRC(unsigned int* ssrc) = 0; + virtual bool GetRemoteSSRC(unsigned int* ssrc) = 0; + virtual bool SetLocalCNAME(const char* cname) = 0; + + /** + * Functions returning stats needed by w3c stats model. + */ + virtual bool GetVideoEncoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* droppedFrames) = 0; + virtual bool GetVideoDecoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* discardedPackets) = 0; + virtual bool GetAVStats(int32_t* jitterBufferDelayMs, + int32_t* playoutBufferDelayMs, + int32_t* avSyncOffsetMs) = 0; + virtual bool GetRTPStats(unsigned int* jitterMs, + unsigned int* cumulativeLost) = 0; + virtual bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp, + uint32_t* jitterMs, + uint32_t* packetsReceived, + uint64_t* bytesReceived, + uint32_t* cumulativeLost, + int32_t* rttMs) = 0; + virtual bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp, + unsigned int* packetsSent, + uint64_t* bytesSent) = 0; + + virtual uint64_t CodecPluginID() = 0; + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSessionConduit) + +}; + +// Abstract base classes for external encoder/decoder. +class CodecPluginID +{ +public: + virtual ~CodecPluginID() {} + + virtual uint64_t PluginID() const = 0; +}; + +class VideoEncoder : public CodecPluginID +{ +public: + virtual ~VideoEncoder() {} +}; + +class VideoDecoder : public CodecPluginID +{ +public: + virtual ~VideoDecoder() {} +}; + +/** + * MediaSessionConduit for video + * Refer to the comments on MediaSessionConduit above for overall + * information + */ +class VideoSessionConduit : public MediaSessionConduit +{ +public: + /** + * Factory function to create and initialize a Video Conduit Session + * return: Concrete VideoSessionConduitObject or nullptr in the case + * of failure + */ + static RefPtr<VideoSessionConduit> Create(); + + enum FrameRequestType + { + FrameRequestNone, + FrameRequestFir, + FrameRequestPli, + FrameRequestUnknown + }; + + VideoSessionConduit() : mFrameRequestMethod(FrameRequestNone), + mUsingNackBasic(false), + mUsingTmmbr(false), + mUsingFEC(false) {} + + virtual ~VideoSessionConduit() {} + + virtual Type type() const { return VIDEO; } + + /** + * Function to attach Renderer end-point of the Media-Video conduit. + * @param aRenderer : Reference to the concrete Video renderer implementation + * Note: Multiple invocations of this API shall remove an existing renderer + * and attaches the new to the Conduit. + */ + virtual MediaConduitErrorCode AttachRenderer(RefPtr<VideoRenderer> aRenderer) = 0; + virtual void DetachRenderer() = 0; + + /** + * Function to deliver a capture video frame for encoding and transport + * @param video_frame: pointer to captured video-frame. + * @param video_frame_length: size of the frame + * @param width, height: dimensions of the frame + * @param video_type: Type of the video frame - I420, RAW + * @param captured_time: timestamp when the frame was captured. + * if 0 timestamp is automatcally generated + * NOTE: ConfigureSendMediaCodec() MUST be called before this function can be invoked + * This ensures the inserted video-frames can be transmitted by the conduit + */ + virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame, + unsigned int video_frame_length, + unsigned short width, + unsigned short height, + VideoType video_type, + uint64_t capture_time) = 0; + virtual MediaConduitErrorCode SendVideoFrame(webrtc::I420VideoFrame& frame) = 0; + + virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) = 0; + /** + * Function to configure send codec for the video session + * @param sendSessionConfig: CodecConfiguration + * @result: On Success, the video engine is configured with passed in codec for send + * On failure, video engine transmit functionality is disabled. + * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting + * transmission sub-system on the engine + * + */ + virtual MediaConduitErrorCode ConfigureSendMediaCodec(const VideoCodecConfig* sendSessionConfig) = 0; + + /** + * Function to configurelist of receive codecs for the video session + * @param sendSessionConfig: CodecConfiguration + * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting + * reception sub-system on the engine + * + */ + virtual MediaConduitErrorCode ConfigureRecvMediaCodecs( + const std::vector<VideoCodecConfig* >& recvCodecConfigList) = 0; + + /** + * Set an external encoder + * @param encoder + * @result: on success, we will use the specified encoder + */ + virtual MediaConduitErrorCode SetExternalSendCodec(VideoCodecConfig* config, + VideoEncoder* encoder) = 0; + + /** + * Set an external decoder + * @param decoder + * @result: on success, we will use the specified decoder + */ + virtual MediaConduitErrorCode SetExternalRecvCodec(VideoCodecConfig* config, + VideoDecoder* decoder) = 0; + + /** + * Function to enable the RTP Stream ID (RID) extension + * @param enabled: enable extension + * @param id: id to be used for this rtp header extension + * NOTE: See VideoConduit for more information + */ + virtual MediaConduitErrorCode EnableRTPStreamIdExtension(bool enabled, uint8_t id) = 0; + + /** + * These methods allow unit tests to double-check that the + * max-fs and max-fr related settings are as expected. + */ + virtual unsigned short SendingWidth() = 0; + + virtual unsigned short SendingHeight() = 0; + + virtual unsigned int SendingMaxFs() = 0; + + virtual unsigned int SendingMaxFr() = 0; + + /** + * These methods allow unit tests to double-check that the + * rtcp-fb settings are as expected. + */ + FrameRequestType FrameRequestMethod() const { + return mFrameRequestMethod; + } + + bool UsingNackBasic() const { + return mUsingNackBasic; + } + + bool UsingTmmbr() const { + return mUsingTmmbr; + } + + bool UsingFEC() const { + return mUsingFEC; + } + + protected: + /* RTCP feedback settings, for unit testing purposes */ + FrameRequestType mFrameRequestMethod; + bool mUsingNackBasic; + bool mUsingTmmbr; + bool mUsingFEC; +}; + +/** + * MediaSessionConduit for audio + * Refer to the comments on MediaSessionConduit above for overall + * information + */ +class AudioSessionConduit : public MediaSessionConduit +{ +public: + + /** + * Factory function to create and initialize an Audio Conduit Session + * return: Concrete AudioSessionConduitObject or nullptr in the case + * of failure + */ + static RefPtr<AudioSessionConduit> Create(); + + virtual ~AudioSessionConduit() {} + + virtual Type type() const { return AUDIO; } + + + /** + * Function to deliver externally captured audio sample for encoding and transport + * @param audioData [in]: Pointer to array containing a frame of audio + * @param lengthSamples [in]: Length of audio frame in samples in multiple of 10 milliseconds + * Ex: Frame length is 160, 320, 440 for 16, 32, 44 kHz sampling rates + respectively. + audioData[] is lengthSamples in size + say, for 16kz sampling rate, audioData[] should contain 160 + samples of 16-bits each for a 10m audio frame. + * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz ( 16000, 32000 ...) + * @param capture_delay [in]: Approx Delay from recording until it is delivered to VoiceEngine + in milliseconds. + * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked + * This ensures the inserted audio-samples can be transmitted by the conduit + * + */ + virtual MediaConduitErrorCode SendAudioFrame(const int16_t audioData[], + int32_t lengthSamples, + int32_t samplingFreqHz, + int32_t capture_delay) = 0; + + /** + * Function to grab a decoded audio-sample from the media engine for rendering + * / playoutof length 10 milliseconds. + * + * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied + * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..) + * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback + * @param lengthSamples [out]: Will contain length of the audio frame in samples at return. + Ex: A value of 160 implies 160 samples each of 16-bits was copied + into speechData + * NOTE: This function should be invoked every 10 milliseconds for the best + * peformance + * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked + * This ensures the decoded samples are ready for reading. + * + */ + virtual MediaConduitErrorCode GetAudioFrame(int16_t speechData[], + int32_t samplingFreqHz, + int32_t capture_delay, + int& lengthSamples) = 0; + + /** + * Function to configure send codec for the audio session + * @param sendSessionConfig: CodecConfiguration + * NOTE: See VideoConduit for more information + */ + + virtual MediaConduitErrorCode ConfigureSendMediaCodec(const AudioCodecConfig* sendCodecConfig) = 0; + + /** + * Function to configure list of receive codecs for the audio session + * @param sendSessionConfig: CodecConfiguration + * NOTE: See VideoConduit for more information + */ + virtual MediaConduitErrorCode ConfigureRecvMediaCodecs( + const std::vector<AudioCodecConfig* >& recvCodecConfigList) = 0; + /** + * Function to enable the audio level extension + * @param enabled: enable extension + * @param id: id to be used for this rtp header extension + * NOTE: See AudioConduit for more information + */ + virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) = 0; + + virtual bool SetDtmfPayloadType(unsigned char type) = 0; + + virtual bool InsertDTMFTone(int channel, int eventCode, bool outOfBand, + int lengthMs, int attenuationDb) = 0; + +}; +} +#endif diff --git a/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp new file mode 100644 index 000000000..d46398402 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp @@ -0,0 +1,30 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "OMXVideoCodec.h" + +#ifdef WEBRTC_GONK +#include "WebrtcOMXH264VideoCodec.h" +#endif + +namespace mozilla { + +VideoEncoder* +OMXVideoCodec::CreateEncoder(CodecType aCodecType) +{ + if (aCodecType == CODEC_H264) { + return new WebrtcOMXH264VideoEncoder(); + } + return nullptr; +} + +VideoDecoder* +OMXVideoCodec::CreateDecoder(CodecType aCodecType) { + if (aCodecType == CODEC_H264) { + return new WebrtcOMXH264VideoDecoder(); + } + return nullptr; +} + +} diff --git a/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h new file mode 100644 index 000000000..51df50263 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h @@ -0,0 +1,32 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef OMX_VIDEO_CODEC_H_ +#define OMX_VIDEO_CODEC_H_ + +#include "MediaConduitInterface.h" + +namespace mozilla { +class OMXVideoCodec { + public: + enum CodecType { + CODEC_H264, + }; + + /** + * Create encoder object for codec type |aCodecType|. Return |nullptr| when + * failed. + */ + static VideoEncoder* CreateEncoder(CodecType aCodecType); + + /** + * Create decoder object for codec type |aCodecType|. Return |nullptr| when + * failed. + */ + static VideoDecoder* CreateDecoder(CodecType aCodecType); +}; + +} + +#endif // OMX_VIDEO_CODEC_H_ diff --git a/media/webrtc/signaling/src/media-conduit/RunningStat.h b/media/webrtc/signaling/src/media-conduit/RunningStat.h new file mode 100644 index 000000000..1d0cdbeca --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/RunningStat.h @@ -0,0 +1,66 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ +/* Adapted from "Accurately computing running variance - John D. Cook" + http://www.johndcook.com/standard_deviation.html */ + +#ifndef RUNNING_STAT_H_ +#define RUNNING_STAT_H_ +#include <math.h> + +namespace mozilla { + +class RunningStat +{ +public: + RunningStat() : mN(0) {} + + void Clear() + { + mN = 0; + } + + void Push(double x) + { + mN++; + + // See Knuth TAOCP vol 2, 3rd edition, page 232 + if (mN == 1) + { + mOldM = mNewM = x; + mOldS = 0.0; + } else { + mNewM = mOldM + (x - mOldM) / mN; + mNewS = mOldS + (x - mOldM) * (x - mNewM); + + // set up for next iteration + mOldM = mNewM; + mOldS = mNewS; + } + } + + int NumDataValues() const + { + return mN; + } + + double Mean() const + { + return (mN > 0) ? mNewM : 0.0; + } + + double Variance() const + { + return (mN > 1) ? mNewS / (mN - 1) : 0.0; + } + + double StandardDeviation() const + { + return sqrt(Variance()); + } + +private: + int mN; + double mOldM, mNewM, mOldS, mNewS; +}; +} +#endif //RUNNING_STAT_H_ diff --git a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp new file mode 100755 index 000000000..3f0445122 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp @@ -0,0 +1,2129 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CSFLog.h" +#include "nspr.h" +#include "plstr.h" + +#include "VideoConduit.h" +#include "AudioConduit.h" +#include "nsThreadUtils.h" +#include "LoadManager.h" +#include "YuvStamper.h" +#include "nsServiceManagerUtils.h" +#include "nsIPrefService.h" +#include "nsIPrefBranch.h" +#include "mozilla/media/MediaUtils.h" +#include "mozilla/TemplateLib.h" + +#include "webrtc/common_types.h" +#include "webrtc/common_video/interface/native_handle.h" +#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h" +#include "webrtc/video_engine/include/vie_errors.h" +#include "webrtc/video_engine/vie_defines.h" + +#include "mozilla/Unused.h" + +#ifdef MOZ_WIDGET_ANDROID +#include "AndroidJNIWrapper.h" +#endif + +// for ntohs +#ifdef _MSC_VER +#include "Winsock2.h" +#else +#include <netinet/in.h> +#endif + +#include <algorithm> +#include <math.h> + +#define DEFAULT_VIDEO_MAX_FRAMERATE 30 +#define INVALID_RTP_PAYLOAD 255 //valid payload types are 0 to 127 + +namespace mozilla { + +static const char* logTag ="WebrtcVideoSessionConduit"; + +// 32 bytes is what WebRTC CodecInst expects +const unsigned int WebrtcVideoConduit::CODEC_PLNAME_SIZE = 32; + +/** + * Factory Method for VideoConduit + */ +RefPtr<VideoSessionConduit> +VideoSessionConduit::Create() +{ + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + WebrtcVideoConduit* obj = new WebrtcVideoConduit(); + if(obj->Init() != kMediaConduitNoError) + { + CSFLogError(logTag, "%s VideoConduit Init Failed ", __FUNCTION__); + delete obj; + return nullptr; + } + CSFLogDebug(logTag, "%s Successfully created VideoConduit ", __FUNCTION__); + return obj; +} + +WebrtcVideoConduit::WebrtcVideoConduit(): + mVideoEngine(nullptr), + mTransportMonitor("WebrtcVideoConduit"), + mTransmitterTransport(nullptr), + mReceiverTransport(nullptr), + mRenderer(nullptr), + mPtrExtCapture(nullptr), + mEngineTransmitting(false), + mEngineReceiving(false), + mChannel(-1), + mCapId(-1), + mCodecMutex("VideoConduit codec db"), + mInReconfig(false), + mLastWidth(0), // forces a check for reconfig at start + mLastHeight(0), + mSendingWidth(0), + mSendingHeight(0), + mReceivingWidth(0), + mReceivingHeight(0), + mSendingFramerate(DEFAULT_VIDEO_MAX_FRAMERATE), + mLastFramerateTenths(DEFAULT_VIDEO_MAX_FRAMERATE*10), + mNumReceivingStreams(1), + mVideoLatencyTestEnable(false), + mVideoLatencyAvg(0), + mMinBitrate(0), + mStartBitrate(0), + mMaxBitrate(0), + mMinBitrateEstimate(0), + mRtpStreamIdEnabled(false), + mRtpStreamIdExtId(0), + mCodecMode(webrtc::kRealtimeVideo) +{} + +WebrtcVideoConduit::~WebrtcVideoConduit() +{ + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + // Release AudioConduit first by dropping reference on MainThread, where it expects to be + SyncTo(nullptr); + Destroy(); +} + +bool WebrtcVideoConduit::SetLocalSSRC(unsigned int ssrc) +{ + unsigned int oldSsrc; + if (!GetLocalSSRC(&oldSsrc)) { + MOZ_ASSERT(false, "GetLocalSSRC failed"); + return false; + } + + if (oldSsrc == ssrc) { + return true; + } + + bool wasTransmitting = mEngineTransmitting; + if (StopTransmitting() != kMediaConduitNoError) { + return false; + } + + if (mPtrRTP->SetLocalSSRC(mChannel, ssrc)) { + return false; + } + + if (wasTransmitting) { + if (StartTransmitting() != kMediaConduitNoError) { + return false; + } + } + return true; +} + +bool WebrtcVideoConduit::GetLocalSSRC(unsigned int* ssrc) +{ + return !mPtrRTP->GetLocalSSRC(mChannel, *ssrc); +} + +bool WebrtcVideoConduit::GetRemoteSSRC(unsigned int* ssrc) +{ + return !mPtrRTP->GetRemoteSSRC(mChannel, *ssrc); +} + +bool WebrtcVideoConduit::SetLocalCNAME(const char* cname) +{ + char temp[256]; + strncpy(temp, cname, sizeof(temp) - 1); + temp[sizeof(temp) - 1] = 0; + return !mPtrRTP->SetRTCPCName(mChannel, temp); +} + +bool WebrtcVideoConduit::GetVideoEncoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* droppedFrames) +{ + if (!mEngineTransmitting) { + return false; + } + MOZ_ASSERT(mVideoCodecStat); + mVideoCodecStat->GetEncoderStats(framerateMean, framerateStdDev, + bitrateMean, bitrateStdDev, + droppedFrames); + + // See if we need to adjust bandwidth. + // Avoid changing bandwidth constantly; use hysteresis. + + // Note: mLastFramerate is a relaxed Atomic because we're setting it here, and + // reading it on whatever thread calls DeliverFrame/SendVideoFrame. Alternately + // we could use a lock. Note that we don't change it often, and read it once per frame. + // We scale by *10 because mozilla::Atomic<> doesn't do 'double' or 'float'. + double framerate = mLastFramerateTenths/10.0; // fetch once + if (std::abs(*framerateMean - framerate)/framerate > 0.1 && + *framerateMean >= 0.5) { + // unchanged resolution, but adjust bandwidth limits to match camera fps + CSFLogDebug(logTag, "Encoder frame rate changed from %f to %f", + (mLastFramerateTenths/10.0), *framerateMean); + MutexAutoLock lock(mCodecMutex); + mLastFramerateTenths = *framerateMean * 10; + SelectSendResolution(mSendingWidth, mSendingHeight, nullptr); + } + return true; +} + +bool WebrtcVideoConduit::GetVideoDecoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* discardedPackets) +{ + if (!mEngineReceiving) { + return false; + } + MOZ_ASSERT(mVideoCodecStat); + mVideoCodecStat->GetDecoderStats(framerateMean, framerateStdDev, + bitrateMean, bitrateStdDev, + discardedPackets); + return true; +} + +bool WebrtcVideoConduit::GetAVStats(int32_t* jitterBufferDelayMs, + int32_t* playoutBufferDelayMs, + int32_t* avSyncOffsetMs) { + return false; +} + +bool WebrtcVideoConduit::GetRTPStats(unsigned int* jitterMs, + unsigned int* cumulativeLost) { + unsigned short fractionLost; + unsigned extendedMax; + int64_t rttMs; + // GetReceivedRTCPStatistics is a poorly named GetRTPStatistics variant + return !mPtrRTP->GetReceivedRTCPStatistics(mChannel, fractionLost, + *cumulativeLost, + extendedMax, + *jitterMs, + rttMs); +} + +bool WebrtcVideoConduit::GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp, + uint32_t* jitterMs, + uint32_t* packetsReceived, + uint64_t* bytesReceived, + uint32_t* cumulativeLost, + int32_t* rttMs) { + uint32_t ntpHigh, ntpLow; + uint16_t fractionLost; + bool result = !mPtrRTP->GetRemoteRTCPReceiverInfo(mChannel, ntpHigh, ntpLow, + *packetsReceived, + *bytesReceived, + jitterMs, + &fractionLost, + cumulativeLost, + rttMs); + if (result) { + *timestamp = NTPtoDOMHighResTimeStamp(ntpHigh, ntpLow); + } + return result; +} + +bool WebrtcVideoConduit::GetRTCPSenderReport(DOMHighResTimeStamp* timestamp, + unsigned int* packetsSent, + uint64_t* bytesSent) { + struct webrtc::SenderInfo senderInfo; + bool result = !mPtrRTP->GetRemoteRTCPSenderInfo(mChannel, &senderInfo); + if (result) { + *timestamp = NTPtoDOMHighResTimeStamp(senderInfo.NTP_timestamp_high, + senderInfo.NTP_timestamp_low); + *packetsSent = senderInfo.sender_packet_count; + *bytesSent = senderInfo.sender_octet_count; + } + return result; +} + +MediaConduitErrorCode +WebrtcVideoConduit::InitMain() +{ +#if defined(MOZILLA_INTERNAL_API) + // already know we must be on MainThread barring unit test weirdness + MOZ_ASSERT(NS_IsMainThread()); + + nsresult rv; + nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); + if (!NS_WARN_IF(NS_FAILED(rv))) + { + nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); + + if (branch) + { + int32_t temp; + Unused << NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.video.test_latency", &mVideoLatencyTestEnable))); + if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.min_bitrate", &temp)))) + { + if (temp >= 0) { + mMinBitrate = temp; + } + } + if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.start_bitrate", &temp)))) + { + if (temp >= 0) { + mStartBitrate = temp; + } + } + if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.max_bitrate", &temp)))) + { + if (temp >= 0) { + mMaxBitrate = temp; + } + } + if (mMinBitrate != 0 && mMinBitrate < webrtc::kViEMinCodecBitrate) { + mMinBitrate = webrtc::kViEMinCodecBitrate; + } + if (mStartBitrate < mMinBitrate) { + mStartBitrate = mMinBitrate; + } + if (mStartBitrate > mMaxBitrate) { + mStartBitrate = mMaxBitrate; + } + if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.min_bitrate_estimate", &temp)))) + { + if (temp >= 0) { + mMinBitrateEstimate = temp; + } + } + bool use_loadmanager = false; + if (!NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.navigator.load_adapt", &use_loadmanager)))) + { + if (use_loadmanager) { + mLoadManager = LoadManagerBuild(); + } + } + } + } + +#ifdef MOZ_WIDGET_ANDROID + // get the JVM + JavaVM *jvm = jsjni_GetVM(); + + if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) { + CSFLogError(logTag, "%s: could not set Android objects", __FUNCTION__); + return kMediaConduitSessionNotInited; + } +#endif +#endif + return kMediaConduitNoError; +} + +/** + * Performs initialization of the MANDATORY components of the Video Engine + */ +MediaConduitErrorCode +WebrtcVideoConduit::Init() +{ + CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this); + MediaConduitErrorCode result; + // Run code that must run on MainThread first + MOZ_ASSERT(NS_IsMainThread()); + result = InitMain(); + if (result != kMediaConduitNoError) { + return result; + } + + // Per WebRTC APIs below function calls return nullptr on failure + mVideoEngine = webrtc::VideoEngine::Create(); + if(!mVideoEngine) + { + CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get video capture interface", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if( !(mPtrViECodec = ViECodec::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get video codec interface ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if( !(mPtrViENetwork = ViENetwork::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get video network interface ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if( !(mPtrViERender = ViERender::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get video render interface ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine); + if (!mPtrExtCodec) { + CSFLogError(logTag, "%s Unable to get external codec interface: %d ", + __FUNCTION__,mPtrViEBase->LastError()); + return kMediaConduitSessionNotInited; + } + + if( !(mPtrRTP = webrtc::ViERTP_RTCP::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get video RTCP interface ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + if ( !(mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine))) + { + CSFLogError(logTag, "%s Unable to get external codec interface %d ", + __FUNCTION__, mPtrViEBase->LastError()); + return kMediaConduitSessionNotInited; + } + + CSFLogDebug(logTag, "%s Engine Created: Init'ng the interfaces ",__FUNCTION__); + + if(mPtrViEBase->Init() == -1) + { + CSFLogError(logTag, " %s Video Engine Init Failed %d ",__FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitSessionNotInited; + } + + if(mPtrViEBase->CreateChannel(mChannel) == -1) + { + CSFLogError(logTag, " %s Channel creation Failed %d ",__FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitChannelError; + } + + if(mPtrViENetwork->RegisterSendTransport(mChannel, *this) == -1) + { + CSFLogError(logTag, "%s ViENetwork Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitTransportRegistrationFail; + } + + if(mPtrViECapture->AllocateExternalCaptureDevice(mCapId, + mPtrExtCapture) == -1) + { + CSFLogError(logTag, "%s Unable to Allocate capture module: %d ", + __FUNCTION__, mPtrViEBase->LastError()); + return kMediaConduitCaptureError; + } + + if(mPtrViECapture->ConnectCaptureDevice(mCapId,mChannel) == -1) + { + CSFLogError(logTag, "%s Unable to Connect capture module: %d ", + __FUNCTION__,mPtrViEBase->LastError()); + return kMediaConduitCaptureError; + } + // Set up some parameters, per juberti. Set MTU. + if(mPtrViENetwork->SetMTU(mChannel, 1200) != 0) + { + CSFLogError(logTag, "%s MTU Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitMTUError; + } + // Turn on RTCP and loss feedback reporting. + if(mPtrRTP->SetRTCPStatus(mChannel, webrtc::kRtcpCompound_RFC4585) != 0) + { + CSFLogError(logTag, "%s RTCPStatus Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitRTCPStatusError; + } + + if (mPtrViERender->AddRenderer(mChannel, + webrtc::kVideoI420, + (webrtc::ExternalRenderer*) this) == -1) { + CSFLogError(logTag, "%s Failed to added external renderer ", __FUNCTION__); + return kMediaConduitInvalidRenderer; + } + + if (mLoadManager) { + mPtrViEBase->RegisterCpuOveruseObserver(mChannel, mLoadManager); + mPtrViEBase->SetLoadManager(mLoadManager); + } + + CSFLogError(logTag, "%s Initialization Done", __FUNCTION__); + return kMediaConduitNoError; +} + +void +WebrtcVideoConduit::Destroy() +{ + // The first one of a pair to be deleted shuts down media for both + //Deal with External Capturer + if(mPtrViECapture) + { + mPtrViECapture->DisconnectCaptureDevice(mCapId); + mPtrViECapture->ReleaseCaptureDevice(mCapId); + mPtrExtCapture = nullptr; + } + + if (mPtrExtCodec) { + mPtrExtCodec->Release(); + mPtrExtCodec = NULL; + } + + //Deal with External Renderer + if(mPtrViERender) + { + if(mRenderer) { + mPtrViERender->StopRender(mChannel); + } + mPtrViERender->RemoveRenderer(mChannel); + } + + //Deal with the transport + if(mPtrViENetwork) + { + mPtrViENetwork->DeregisterSendTransport(mChannel); + } + + if(mPtrViEBase) + { + mPtrViEBase->StopSend(mChannel); + mPtrViEBase->StopReceive(mChannel); + mPtrViEBase->DeleteChannel(mChannel); + } + + // mVideoCodecStat has a back-ptr to mPtrViECodec that must be released first + if (mVideoCodecStat) { + mVideoCodecStat->EndOfCallStats(); + } + mVideoCodecStat = nullptr; + // We can't delete the VideoEngine until all these are released! + // And we can't use a Scoped ptr, since the order is arbitrary + mPtrViEBase = nullptr; + mPtrViECapture = nullptr; + mPtrViECodec = nullptr; + mPtrViENetwork = nullptr; + mPtrViERender = nullptr; + mPtrRTP = nullptr; + mPtrExtCodec = nullptr; + + // only one opener can call Delete. Have it be the last to close. + if(mVideoEngine) + { + webrtc::VideoEngine::Delete(mVideoEngine); + } +} + +void +WebrtcVideoConduit::SyncTo(WebrtcAudioConduit *aConduit) +{ + CSFLogDebug(logTag, "%s Synced to %p", __FUNCTION__, aConduit); + + // SyncTo(value) syncs to the AudioConduit, and if already synced replaces + // the current sync target. SyncTo(nullptr) cancels any existing sync and + // releases the strong ref to AudioConduit. + if (aConduit) { + mPtrViEBase->SetVoiceEngine(aConduit->GetVoiceEngine()); + mPtrViEBase->ConnectAudioChannel(mChannel, aConduit->GetChannel()); + // NOTE: this means the VideoConduit will keep the AudioConduit alive! + } else { + mPtrViEBase->DisconnectAudioChannel(mChannel); + mPtrViEBase->SetVoiceEngine(nullptr); + } + + mSyncedTo = aConduit; +} + +MediaConduitErrorCode +WebrtcVideoConduit::AttachRenderer(RefPtr<VideoRenderer> aVideoRenderer) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + //null renderer + if(!aVideoRenderer) + { + CSFLogError(logTag, "%s NULL Renderer", __FUNCTION__); + MOZ_ASSERT(false); + return kMediaConduitInvalidRenderer; + } + + // This function is called only from main, so we only need to protect against + // modifying mRenderer while any webrtc.org code is trying to use it. + bool wasRendering; + { + ReentrantMonitorAutoEnter enter(mTransportMonitor); + wasRendering = !!mRenderer; + mRenderer = aVideoRenderer; + // Make sure the renderer knows the resolution + mRenderer->FrameSizeChange(mReceivingWidth, + mReceivingHeight, + mNumReceivingStreams); + } + + if (!wasRendering) { + if(mPtrViERender->StartRender(mChannel) == -1) + { + CSFLogError(logTag, "%s Starting the Renderer Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + ReentrantMonitorAutoEnter enter(mTransportMonitor); + mRenderer = nullptr; + return kMediaConduitRendererFail; + } + } + + return kMediaConduitNoError; +} + +void +WebrtcVideoConduit::DetachRenderer() +{ + { + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mRenderer) + { + mRenderer = nullptr; + } + } + + mPtrViERender->StopRender(mChannel); +} + +MediaConduitErrorCode +WebrtcVideoConduit::SetTransmitterTransport(RefPtr<TransportInterface> aTransport) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + // set the transport + mTransmitterTransport = aTransport; + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::SetReceiverTransport(RefPtr<TransportInterface> aTransport) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + // set the transport + mReceiverTransport = aTransport; + return kMediaConduitNoError; +} +MediaConduitErrorCode +WebrtcVideoConduit::ConfigureCodecMode(webrtc::VideoCodecMode mode) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + mCodecMode = mode; + return kMediaConduitNoError; +} +/** + * Note: Setting the send-codec on the Video Engine will restart the encoder, + * sets up new SSRC and reset RTP_RTCP module with the new codec setting. + * + * Note: this is called from MainThread, and the codec settings are read on + * videoframe delivery threads (i.e in SendVideoFrame(). With + * renegotiation/reconfiguration, this now needs a lock! Alternatively + * changes could be queued until the next frame is delivered using an + * Atomic pointer and swaps. + */ +MediaConduitErrorCode +WebrtcVideoConduit::ConfigureSendMediaCodec(const VideoCodecConfig* codecConfig) +{ + CSFLogDebug(logTag, "%s for %s", __FUNCTION__, codecConfig ? codecConfig->mName.c_str() : "<null>"); + bool codecFound = false; + MediaConduitErrorCode condError = kMediaConduitNoError; + int error = 0; //webrtc engine errors + webrtc::VideoCodec video_codec; + std::string payloadName; + + memset(&video_codec, 0, sizeof(video_codec)); + + { + //validate basic params + if((condError = ValidateCodecConfig(codecConfig,true)) != kMediaConduitNoError) + { + return condError; + } + } + + condError = StopTransmitting(); + if (condError != kMediaConduitNoError) { + return condError; + } + + if (mRtpStreamIdEnabled) { + video_codec.ridId = mRtpStreamIdExtId; + } + if (mExternalSendCodec && + codecConfig->mType == mExternalSendCodec->mType) { + CSFLogError(logTag, "%s Configuring External H264 Send Codec", __FUNCTION__); + + // width/height will be overridden on the first frame + video_codec.width = 320; + video_codec.height = 240; +#ifdef MOZ_WEBRTC_OMX + if (codecConfig->mType == webrtc::kVideoCodecH264) { + video_codec.resolution_divisor = 16; + } else { + video_codec.resolution_divisor = 1; // We could try using it to handle odd resolutions + } +#else + video_codec.resolution_divisor = 1; // We could try using it to handle odd resolutions +#endif + video_codec.qpMax = 56; + video_codec.numberOfSimulcastStreams = 1; + video_codec.simulcastStream[0].jsScaleDownBy = + codecConfig->mEncodingConstraints.scaleDownBy; + video_codec.mode = mCodecMode; + + codecFound = true; + } else { + // we should be good here to set the new codec. + for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) + { + if(0 == mPtrViECodec->GetCodec(idx, video_codec)) + { + payloadName = video_codec.plName; + if(codecConfig->mName.compare(payloadName) == 0) + { + // Note: side-effect of this is that video_codec is filled in + // by GetCodec() + codecFound = true; + break; + } + } + }//for + } + + if(codecFound == false) + { + CSFLogError(logTag, "%s Codec Mismatch ", __FUNCTION__); + return kMediaConduitInvalidSendCodec; + } + // Note: only for overriding parameters from GetCodec()! + CodecConfigToWebRTCCodec(codecConfig, video_codec); + if (mSendingWidth != 0) { + // We're already in a call and are reconfiguring (perhaps due to + // ReplaceTrack). Set to match the last frame we sent. + + // We could also set mLastWidth to 0, to force immediate reconfig - + // more expensive, but perhaps less risk of missing something. Really + // on ReplaceTrack we should just call ConfigureCodecMode(), and if the + // mode changed, we re-configure. + // Do this after CodecConfigToWebRTCCodec() to avoid messing up simulcast + video_codec.width = mSendingWidth; + video_codec.height = mSendingHeight; + video_codec.maxFramerate = mSendingFramerate; + } else { + mSendingWidth = 0; + mSendingHeight = 0; + mSendingFramerate = video_codec.maxFramerate; + } + + video_codec.mode = mCodecMode; + + if(mPtrViECodec->SetSendCodec(mChannel, video_codec) == -1) + { + error = mPtrViEBase->LastError(); + if(error == kViECodecInvalidCodec) + { + CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); + return kMediaConduitInvalidSendCodec; + } + CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitUnknownError; + } + + if (mMinBitrateEstimate != 0) { + mPtrViENetwork->SetBitrateConfig(mChannel, + mMinBitrateEstimate, + std::max(video_codec.startBitrate, + mMinBitrateEstimate), + std::max(video_codec.maxBitrate, + mMinBitrateEstimate)); + } + + if (!mVideoCodecStat) { + mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec); + } + mVideoCodecStat->Register(true); + + // See Bug 1297058, enabling FEC when NACK is set on H.264 is problematic + bool use_fec = codecConfig->RtcpFbFECIsSet(); + if ((mExternalSendCodec && codecConfig->mType == mExternalSendCodec->mType) + || codecConfig->mType == webrtc::kVideoCodecH264) { + if(codecConfig->RtcpFbNackIsSet("")) { + use_fec = false; + } + } + + if (use_fec) + { + uint8_t payload_type_red = INVALID_RTP_PAYLOAD; + uint8_t payload_type_ulpfec = INVALID_RTP_PAYLOAD; + if (!DetermineREDAndULPFECPayloadTypes(payload_type_red, payload_type_ulpfec)) { + CSFLogError(logTag, "%s Unable to set FEC status: could not determine" + "payload type: red %u ulpfec %u", + __FUNCTION__, payload_type_red, payload_type_ulpfec); + return kMediaConduitFECStatusError; + } + + if(codecConfig->RtcpFbNackIsSet("")) { + CSFLogDebug(logTag, "Enabling NACK/FEC (send) for video stream\n"); + if (mPtrRTP->SetHybridNACKFECStatus(mChannel, true, + payload_type_red, + payload_type_ulpfec) != 0) { + CSFLogError(logTag, "%s SetHybridNACKFECStatus Failed %d ", + __FUNCTION__, mPtrViEBase->LastError()); + return kMediaConduitHybridNACKFECStatusError; + } + } else { + CSFLogDebug(logTag, "Enabling FEC (send) for video stream\n"); + if (mPtrRTP->SetFECStatus(mChannel, true, + payload_type_red, payload_type_ulpfec) != 0) + { + CSFLogError(logTag, "%s SetFECStatus Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitFECStatusError; + } + } + } else if(codecConfig->RtcpFbNackIsSet("")) { + CSFLogDebug(logTag, "Enabling NACK (send) for video stream\n"); + if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) + { + CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitNACKStatusError; + } + } + + { + MutexAutoLock lock(mCodecMutex); + + //Copy the applied config for future reference. + mCurSendCodecConfig = new VideoCodecConfig(*codecConfig); + } + + bool remb_requested = codecConfig->RtcpFbRembIsSet(); + mPtrRTP->SetRembStatus(mChannel, true, remb_requested); + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::ConfigureRecvMediaCodecs( + const std::vector<VideoCodecConfig* >& codecConfigList) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + MediaConduitErrorCode condError = kMediaConduitNoError; + bool success = false; + std::string payloadName; + + condError = StopReceiving(); + if (condError != kMediaConduitNoError) { + return condError; + } + + if(codecConfigList.empty()) + { + CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + webrtc::ViEKeyFrameRequestMethod kf_request = webrtc::kViEKeyFrameRequestNone; + bool use_nack_basic = false; + bool use_tmmbr = false; + bool use_remb = false; + bool use_fec = false; + + //Try Applying the codecs in the list + // we treat as success if atleast one codec was applied and reception was + // started successfully. + for(std::vector<VideoCodecConfig*>::size_type i=0;i < codecConfigList.size();i++) + { + //if the codec param is invalid or diplicate, return error + if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) + { + return condError; + } + + // Check for the keyframe request type: PLI is preferred + // over FIR, and FIR is preferred over none. + if (codecConfigList[i]->RtcpFbNackIsSet("pli")) + { + kf_request = webrtc::kViEKeyFrameRequestPliRtcp; + } else if(kf_request == webrtc::kViEKeyFrameRequestNone && + codecConfigList[i]->RtcpFbCcmIsSet("fir")) + { + kf_request = webrtc::kViEKeyFrameRequestFirRtcp; + } + + // Check whether NACK is requested + if(codecConfigList[i]->RtcpFbNackIsSet("")) + { + use_nack_basic = true; + } + + // Check whether TMMBR is requested + if (codecConfigList[i]->RtcpFbCcmIsSet("tmmbr")) { + use_tmmbr = true; + } + + // Check whether REMB is requested + if (codecConfigList[i]->RtcpFbRembIsSet()) { + use_remb = true; + } + + // Check whether FEC is requested + if (codecConfigList[i]->RtcpFbFECIsSet()) { + use_fec = true; + } + + webrtc::VideoCodec video_codec; + + memset(&video_codec, 0, sizeof(webrtc::VideoCodec)); + + if (mExternalRecvCodec && + codecConfigList[i]->mType == mExternalRecvCodec->mType) { + CSFLogError(logTag, "%s Configuring External H264 Receive Codec", __FUNCTION__); + + // XXX Do we need a separate setting for receive maxbitrate? Is it + // different for hardware codecs? For now assume symmetry. + CodecConfigToWebRTCCodec(codecConfigList[i], video_codec); + + // values SetReceiveCodec() cares about are name, type, maxbitrate + if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) + { + CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, + mPtrViEBase->LastError()); + } else { + CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__, + codecConfigList[i]->mName.c_str()); + success = true; + } + } else { + //Retrieve pre-populated codec structure for our codec. + for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) + { + if(mPtrViECodec->GetCodec(idx, video_codec) == 0) + { + payloadName = video_codec.plName; + if(codecConfigList[i]->mName.compare(payloadName) == 0) + { + CodecConfigToWebRTCCodec(codecConfigList[i], video_codec); + if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) + { + CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, + mPtrViEBase->LastError()); + } else { + CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__, + codecConfigList[i]->mName.c_str()); + success = true; + } + break; //we found a match + } + } + }//end for codeclist + } + }//end for + + if(!success) + { + CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); + return kMediaConduitInvalidReceiveCodec; + } + + if (!mVideoCodecStat) { + mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec); + } + mVideoCodecStat->Register(false); + + // XXX Currently, we gather up all of the feedback types that the remote + // party indicated it supports for all video codecs and configure the entire + // conduit based on those capabilities. This is technically out of spec, + // as these values should be configured on a per-codec basis. However, + // the video engine only provides this API on a per-conduit basis, so that's + // how we have to do it. The approach of considering the remote capablities + // for the entire conduit to be a union of all remote codec capabilities + // (rather than the more conservative approach of using an intersection) + // is made to provide as many feedback mechanisms as are likely to be + // processed by the remote party (and should be relatively safe, since the + // remote party is required to ignore feedback types that it does not + // understand). + // + // Note that our configuration uses this union of remote capabilites as + // input to the configuration. It is not isomorphic to the configuration. + // For example, it only makes sense to have one frame request mechanism + // active at a time; so, if the remote party indicates more than one + // supported mechanism, we're only configuring the one we most prefer. + // + // See http://code.google.com/p/webrtc/issues/detail?id=2331 + + if (kf_request != webrtc::kViEKeyFrameRequestNone) + { + CSFLogDebug(logTag, "Enabling %s frame requests for video stream\n", + (kf_request == webrtc::kViEKeyFrameRequestPliRtcp ? + "PLI" : "FIR")); + if(mPtrRTP->SetKeyFrameRequestMethod(mChannel, kf_request) != 0) + { + CSFLogError(logTag, "%s KeyFrameRequest Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitKeyFrameRequestError; + } + } + + switch (kf_request) { + case webrtc::kViEKeyFrameRequestNone: + mFrameRequestMethod = FrameRequestNone; + break; + case webrtc::kViEKeyFrameRequestPliRtcp: + mFrameRequestMethod = FrameRequestPli; + break; + case webrtc::kViEKeyFrameRequestFirRtcp: + mFrameRequestMethod = FrameRequestFir; + break; + default: + MOZ_ASSERT(false); + mFrameRequestMethod = FrameRequestUnknown; + } + + if (use_fec) + { + uint8_t payload_type_red = INVALID_RTP_PAYLOAD; + uint8_t payload_type_ulpfec = INVALID_RTP_PAYLOAD; + if (!DetermineREDAndULPFECPayloadTypes(payload_type_red, payload_type_ulpfec)) { + CSFLogError(logTag, "%s Unable to set FEC status: could not determine" + "payload type: red %u ulpfec %u", + __FUNCTION__, payload_type_red, payload_type_ulpfec); + return kMediaConduitFECStatusError; + } + + // We also need to call SetReceiveCodec for RED and ULPFEC codecs + for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { + webrtc::VideoCodec video_codec; + if(mPtrViECodec->GetCodec(idx, video_codec) == 0) { + payloadName = video_codec.plName; + if(video_codec.codecType == webrtc::VideoCodecType::kVideoCodecRED || + video_codec.codecType == webrtc::VideoCodecType::kVideoCodecULPFEC) { + if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) { + CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, + mPtrViEBase->LastError()); + } else { + CSFLogDebug(logTag, "%s Successfully Set the codec %s", __FUNCTION__, + video_codec.plName); + } + } + } + } + + if (use_nack_basic) { + CSFLogDebug(logTag, "Enabling NACK/FEC (recv) for video stream\n"); + if (mPtrRTP->SetHybridNACKFECStatus(mChannel, true, + payload_type_red, + payload_type_ulpfec) != 0) { + CSFLogError(logTag, "%s SetHybridNACKFECStatus Failed %d ", + __FUNCTION__, mPtrViEBase->LastError()); + return kMediaConduitNACKStatusError; + } + } else { + CSFLogDebug(logTag, "Enabling FEC (recv) for video stream\n"); + if (mPtrRTP->SetFECStatus(mChannel, true, + payload_type_red, payload_type_ulpfec) != 0) + { + CSFLogError(logTag, "%s SetFECStatus Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitNACKStatusError; + } + } + } else if(use_nack_basic) { + CSFLogDebug(logTag, "Enabling NACK (recv) for video stream\n"); + if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) + { + CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitNACKStatusError; + } + } + mUsingNackBasic = use_nack_basic; + mUsingFEC = use_fec; + + if (use_tmmbr) { + CSFLogDebug(logTag, "Enabling TMMBR for video stream"); + if (mPtrRTP->SetTMMBRStatus(mChannel, true) != 0) { + CSFLogError(logTag, "%s SetTMMBRStatus Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitTMMBRStatusError; + } + } + mUsingTmmbr = use_tmmbr; + + condError = StartReceiving(); + if (condError != kMediaConduitNoError) { + return condError; + } + + // by now we should be successfully started the reception + CSFLogDebug(logTag, "REMB enabled for video stream %s", + (use_remb ? "yes" : "no")); + mPtrRTP->SetRembStatus(mChannel, use_remb, true); + return kMediaConduitNoError; +} + +template<typename T> +T MinIgnoreZero(const T& a, const T& b) +{ + return std::min(a? a:b, b? b:a); +} + +struct ResolutionAndBitrateLimits { + uint32_t resolution_in_mb; + uint16_t min_bitrate; + uint16_t start_bitrate; + uint16_t max_bitrate; +}; + +#define MB_OF(w,h) ((unsigned int)((((w+15)>>4))*((unsigned int)((h+15)>>4)))) + +// For now, try to set the max rates well above the knee in the curve. +// Chosen somewhat arbitrarily; it's hard to find good data oriented for +// realtime interactive/talking-head recording. These rates assume +// 30fps. + +// XXX Populate this based on a pref (which we should consider sorting because +// people won't assume they need to). +static ResolutionAndBitrateLimits kResolutionAndBitrateLimits[] = { + {MB_OF(1920, 1200), 1500, 2000, 10000}, // >HD (3K, 4K, etc) + {MB_OF(1280, 720), 1200, 1500, 5000}, // HD ~1080-1200 + {MB_OF(800, 480), 600, 800, 2500}, // HD ~720 + {tl::Max<MB_OF(400, 240), MB_OF(352, 288)>::value, 200, 300, 1300}, // VGA, WVGA + {MB_OF(176, 144), 100, 150, 500}, // WQVGA, CIF + {0 , 40, 80, 250} // QCIF and below +}; + +void +WebrtcVideoConduit::SelectBitrates(unsigned short width, + unsigned short height, + unsigned int cap, + mozilla::Atomic<int32_t, mozilla::Relaxed>& aLastFramerateTenths, + unsigned int& out_min, + unsigned int& out_start, + unsigned int& out_max) +{ + // max bandwidth should be proportional (not linearly!) to resolution, and + // proportional (perhaps linearly, or close) to current frame rate. + unsigned int fs = MB_OF(width, height); + + for (ResolutionAndBitrateLimits resAndLimits : kResolutionAndBitrateLimits) { + if (fs > resAndLimits.resolution_in_mb && + // pick the highest range where at least start rate is within cap + // (or if we're at the end of the array). + (!cap || resAndLimits.start_bitrate <= cap || + resAndLimits.resolution_in_mb == 0)) { + out_min = MinIgnoreZero((unsigned int)resAndLimits.min_bitrate, cap); + out_start = MinIgnoreZero((unsigned int)resAndLimits.start_bitrate, cap); + out_max = MinIgnoreZero((unsigned int)resAndLimits.max_bitrate, cap); + break; + } + } + + // mLastFramerateTenths is an atomic, and scaled by *10 + double framerate = std::min((aLastFramerateTenths/10.),60.0); + MOZ_ASSERT(framerate > 0); + // Now linear reduction/increase based on fps (max 60fps i.e. doubling) + if (framerate >= 10) { + out_min = out_min * (framerate/30); + out_start = out_start * (framerate/30); + out_max = std::max((unsigned int)(out_max * (framerate/30)), cap); + } else { + // At low framerates, don't reduce bandwidth as much - cut slope to 1/2. + // Mostly this would be ultra-low-light situations/mobile or screensharing. + out_min = out_min * ((10-(framerate/2))/30); + out_start = out_start * ((10-(framerate/2))/30); + out_max = std::max((unsigned int)(out_max * ((10-(framerate/2))/30)), cap); + } + + if (mMinBitrate && mMinBitrate > out_min) { + out_min = mMinBitrate; + } + // If we try to set a minimum bitrate that is too low, ViE will reject it. + out_min = std::max((unsigned int) webrtc::kViEMinCodecBitrate, + out_min); + if (mStartBitrate && mStartBitrate > out_start) { + out_start = mStartBitrate; + } + out_start = std::max(out_start, out_min); + + // Note: mMaxBitrate is the max transport bitrate - it applies to a + // single codec encoding, but should also apply to the sum of all + // simulcast layers in this encoding! + // So sum(layers.maxBitrate) <= mMaxBitrate + if (mMaxBitrate && mMaxBitrate > out_max) { + out_max = mMaxBitrate; + } +} + +static void ConstrainPreservingAspectRatioExact(uint32_t max_fs, + unsigned short* width, + unsigned short* height) +{ + // We could try to pick a better starting divisor, but it won't make any real + // performance difference. + for (size_t d = 1; d < std::min(*width, *height); ++d) { + if ((*width % d) || (*height % d)) { + continue; // Not divisible + } + + if (((*width) * (*height))/(d*d) <= max_fs) { + *width /= d; + *height /= d; + return; + } + } + + *width = 0; + *height = 0; +} + +static void ConstrainPreservingAspectRatio(uint16_t max_width, + uint16_t max_height, + unsigned short* width, + unsigned short* height) +{ + if (((*width) <= max_width) && ((*height) <= max_height)) { + return; + } + + if ((*width) * max_height > max_width * (*height)) + { + (*height) = max_width * (*height) / (*width); + (*width) = max_width; + } + else + { + (*width) = max_height * (*width) / (*height); + (*height) = max_height; + } +} + +// XXX we need to figure out how to feed back changes in preferred capture +// resolution to the getUserMedia source. +// Returns boolean if we've submitted an async change (and took ownership +// of *frame's data) +bool +WebrtcVideoConduit::SelectSendResolution(unsigned short width, + unsigned short height, + webrtc::I420VideoFrame *frame) // may be null +{ + mCodecMutex.AssertCurrentThreadOwns(); + // XXX This will do bandwidth-resolution adaptation as well - bug 877954 + + mLastWidth = width; + mLastHeight = height; + // Enforce constraints + if (mCurSendCodecConfig) { + uint16_t max_width = mCurSendCodecConfig->mEncodingConstraints.maxWidth; + uint16_t max_height = mCurSendCodecConfig->mEncodingConstraints.maxHeight; + if (max_width || max_height) { + max_width = max_width ? max_width : UINT16_MAX; + max_height = max_height ? max_height : UINT16_MAX; + ConstrainPreservingAspectRatio(max_width, max_height, &width, &height); + } + + // Limit resolution to max-fs while keeping same aspect ratio as the + // incoming image. + if (mCurSendCodecConfig->mEncodingConstraints.maxFs) + { + uint32_t max_fs = mCurSendCodecConfig->mEncodingConstraints.maxFs; + unsigned int cur_fs, mb_width, mb_height, mb_max; + + // Could we make this simpler by picking the larger of width and height, + // calculating a max for just that value based on the scale parameter, + // and then let ConstrainPreservingAspectRatio do the rest? + mb_width = (width + 15) >> 4; + mb_height = (height + 15) >> 4; + + cur_fs = mb_width * mb_height; + + // Limit resolution to max_fs, but don't scale up. + if (cur_fs > max_fs) + { + double scale_ratio; + + scale_ratio = sqrt((double) max_fs / (double) cur_fs); + + mb_width = mb_width * scale_ratio; + mb_height = mb_height * scale_ratio; + + // Adjust mb_width and mb_height if they were truncated to zero. + if (mb_width == 0) { + mb_width = 1; + mb_height = std::min(mb_height, max_fs); + } + if (mb_height == 0) { + mb_height = 1; + mb_width = std::min(mb_width, max_fs); + } + } + + // Limit width/height seperately to limit effect of extreme aspect ratios. + mb_max = (unsigned) sqrt(8 * (double) max_fs); + + max_width = 16 * std::min(mb_width, mb_max); + max_height = 16 * std::min(mb_height, mb_max); + ConstrainPreservingAspectRatio(max_width, max_height, &width, &height); + } + } + + + // Adapt to getUserMedia resolution changes + // check if we need to reconfigure the sending resolution. + bool changed = false; + if (mSendingWidth != width || mSendingHeight != height) + { + CSFLogDebug(logTag, "%s: resolution changing to %ux%u (from %ux%u)", + __FUNCTION__, width, height, mSendingWidth, mSendingHeight); + // This will avoid us continually retrying this operation if it fails. + // If the resolution changes, we'll try again. In the meantime, we'll + // keep using the old size in the encoder. + mSendingWidth = width; + mSendingHeight = height; + changed = true; + } + + // uses mSendingWidth/Height + unsigned int framerate = SelectSendFrameRate(mSendingFramerate); + if (mSendingFramerate != framerate) { + CSFLogDebug(logTag, "%s: framerate changing to %u (from %u)", + __FUNCTION__, framerate, mSendingFramerate); + mSendingFramerate = framerate; + changed = true; + } + + if (changed) { + // On a resolution change, bounce this to the correct thread to + // re-configure (same as used for Init(). Do *not* block the calling + // thread since that may be the MSG thread. + + // MUST run on the same thread as Init()/etc + if (!NS_IsMainThread()) { + // Note: on *initial* config (first frame), best would be to drop + // frames until the config is done, then encode the most recent frame + // provided and continue from there. We don't do this, but we do drop + // all frames while in the process of a reconfig and then encode the + // frame that started the reconfig, which is close. There may be + // barely perceptible glitch in the video due to the dropped frame(s). + mInReconfig = true; + + // We can't pass a UniquePtr<> or unique_ptr<> to a lambda directly + webrtc::I420VideoFrame *new_frame = nullptr; + if (frame) { + new_frame = new webrtc::I420VideoFrame(); + // the internal buffer pointer is refcounted, so we don't have 2 copies here + new_frame->ShallowCopy(*frame); + } + RefPtr<WebrtcVideoConduit> self(this); + RefPtr<Runnable> webrtc_runnable = + media::NewRunnableFrom([self, width, height, new_frame]() -> nsresult { + UniquePtr<webrtc::I420VideoFrame> local_frame(new_frame); // Simplify cleanup + + MutexAutoLock lock(self->mCodecMutex); + return self->ReconfigureSendCodec(width, height, new_frame); + }); + // new_frame now owned by lambda + CSFLogDebug(logTag, "%s: proxying lambda to WebRTC thread for reconfig (width %u/%u, height %u/%u", + __FUNCTION__, width, mLastWidth, height, mLastHeight); + NS_DispatchToMainThread(webrtc_runnable.forget()); + if (new_frame) { + return true; // queued it + } + } else { + // already on the right thread + ReconfigureSendCodec(width, height, frame); + } + } + return false; +} + +nsresult +WebrtcVideoConduit::ReconfigureSendCodec(unsigned short width, + unsigned short height, + webrtc::I420VideoFrame *frame) +{ + mCodecMutex.AssertCurrentThreadOwns(); + + // Get current vie codec. + webrtc::VideoCodec vie_codec; + int32_t err; + + mInReconfig = false; + if ((err = mPtrViECodec->GetSendCodec(mChannel, vie_codec)) != 0) + { + CSFLogError(logTag, "%s: GetSendCodec failed, err %d", __FUNCTION__, err); + return NS_ERROR_FAILURE; + } + + CSFLogDebug(logTag, + "%s: Requesting resolution change to %ux%u (from %ux%u)", + __FUNCTION__, width, height, vie_codec.width, vie_codec.height); + + if (mRtpStreamIdEnabled) { + vie_codec.ridId = mRtpStreamIdExtId; + } + + vie_codec.width = width; + vie_codec.height = height; + vie_codec.maxFramerate = mSendingFramerate; + SelectBitrates(vie_codec.width, vie_codec.height, 0, + mLastFramerateTenths, + vie_codec.minBitrate, + vie_codec.startBitrate, + vie_codec.maxBitrate); + + // These are based on lowest-fidelity, because if there is insufficient + // bandwidth for all streams, only the lowest fidelity one will be sent. + uint32_t minMinBitrate = 0; + uint32_t minStartBitrate = 0; + // Total for all simulcast streams. + uint32_t totalMaxBitrate = 0; + + for (size_t i = vie_codec.numberOfSimulcastStreams; i > 0; --i) { + webrtc::SimulcastStream& stream(vie_codec.simulcastStream[i - 1]); + stream.width = width; + stream.height = height; + MOZ_ASSERT(stream.jsScaleDownBy >= 1.0); + uint32_t new_width = uint32_t(width / stream.jsScaleDownBy); + uint32_t new_height = uint32_t(height / stream.jsScaleDownBy); + // TODO: If two layers are similar, only alloc bits to one (Bug 1249859) + if (new_width != width || new_height != height) { + if (vie_codec.numberOfSimulcastStreams == 1) { + // Use less strict scaling in unicast. That way 320x240 / 3 = 106x79. + ConstrainPreservingAspectRatio(new_width, new_height, + &stream.width, &stream.height); + } else { + // webrtc.org supposedly won't tolerate simulcast unless every stream + // is exactly the same aspect ratio. 320x240 / 3 = 80x60. + ConstrainPreservingAspectRatioExact(new_width*new_height, + &stream.width, &stream.height); + } + } + // Give each layer default appropriate bandwidth limits based on the + // resolution/framerate of that layer + SelectBitrates(stream.width, stream.height, + MinIgnoreZero(stream.jsMaxBitrate, vie_codec.maxBitrate), + mLastFramerateTenths, + stream.minBitrate, + stream.targetBitrate, + stream.maxBitrate); + + // webrtc.org expects the last, highest fidelity, simulcast stream to + // always have the same resolution as vie_codec + // Also set the least user-constrained of the stream bitrates on vie_codec. + if (i == vie_codec.numberOfSimulcastStreams) { + vie_codec.width = stream.width; + vie_codec.height = stream.height; + } + minMinBitrate = MinIgnoreZero(stream.minBitrate, minMinBitrate); + minStartBitrate = MinIgnoreZero(stream.targetBitrate, minStartBitrate); + totalMaxBitrate += stream.maxBitrate; + } + if (vie_codec.numberOfSimulcastStreams != 0) { + vie_codec.minBitrate = std::max(minMinBitrate, vie_codec.minBitrate); + vie_codec.maxBitrate = std::min(totalMaxBitrate, vie_codec.maxBitrate); + vie_codec.startBitrate = std::max(vie_codec.minBitrate, + std::min(minStartBitrate, + vie_codec.maxBitrate)); + } + vie_codec.mode = mCodecMode; + if ((err = mPtrViECodec->SetSendCodec(mChannel, vie_codec)) != 0) + { + CSFLogError(logTag, "%s: SetSendCodec(%ux%u) failed, err %d", + __FUNCTION__, width, height, err); + return NS_ERROR_FAILURE; + } + if (mMinBitrateEstimate != 0) { + mPtrViENetwork->SetBitrateConfig(mChannel, + mMinBitrateEstimate, + std::max(vie_codec.startBitrate, + mMinBitrateEstimate), + std::max(vie_codec.maxBitrate, + mMinBitrateEstimate)); + } + + CSFLogDebug(logTag, "%s: Encoder resolution changed to %ux%u @ %ufps, bitrate %u:%u", + __FUNCTION__, width, height, mSendingFramerate, + vie_codec.minBitrate, vie_codec.maxBitrate); + if (frame) { + // XXX I really don't like doing this from MainThread... + mPtrExtCapture->IncomingFrame(*frame); + mVideoCodecStat->SentFrame(); + CSFLogDebug(logTag, "%s Inserted a frame from reconfig lambda", __FUNCTION__); + } + return NS_OK; +} + +// Invoked under lock of mCodecMutex! +unsigned int +WebrtcVideoConduit::SelectSendFrameRate(unsigned int framerate) const +{ + mCodecMutex.AssertCurrentThreadOwns(); + unsigned int new_framerate = framerate; + + // Limit frame rate based on max-mbps + if (mCurSendCodecConfig && mCurSendCodecConfig->mEncodingConstraints.maxMbps) + { + unsigned int cur_fs, mb_width, mb_height, max_fps; + + mb_width = (mSendingWidth + 15) >> 4; + mb_height = (mSendingHeight + 15) >> 4; + + cur_fs = mb_width * mb_height; + if (cur_fs > 0) { // in case no frames have been sent + max_fps = mCurSendCodecConfig->mEncodingConstraints.maxMbps/cur_fs; + if (max_fps < mSendingFramerate) { + new_framerate = max_fps; + } + + if (mCurSendCodecConfig->mEncodingConstraints.maxFps != 0 && + mCurSendCodecConfig->mEncodingConstraints.maxFps < mSendingFramerate) { + new_framerate = mCurSendCodecConfig->mEncodingConstraints.maxFps; + } + } + } + return new_framerate; +} + +MediaConduitErrorCode +WebrtcVideoConduit::SetExternalSendCodec(VideoCodecConfig* config, + VideoEncoder* encoder) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + if (!mPtrExtCodec->RegisterExternalSendCodec(mChannel, + config->mType, + static_cast<WebrtcVideoEncoder*>(encoder), + false)) { + mExternalSendCodecHandle = encoder; + mExternalSendCodec = new VideoCodecConfig(*config); + return kMediaConduitNoError; + } + return kMediaConduitInvalidSendCodec; +} + +MediaConduitErrorCode +WebrtcVideoConduit::SetExternalRecvCodec(VideoCodecConfig* config, + VideoDecoder* decoder) { + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + if (!mPtrExtCodec->RegisterExternalReceiveCodec(mChannel, + config->mType, + static_cast<WebrtcVideoDecoder*>(decoder))) { + mExternalRecvCodecHandle = decoder; + mExternalRecvCodec = new VideoCodecConfig(*config); + return kMediaConduitNoError; + } + return kMediaConduitInvalidReceiveCodec; +} + +MediaConduitErrorCode +WebrtcVideoConduit::EnableRTPStreamIdExtension(bool enabled, uint8_t id) { + mRtpStreamIdEnabled = enabled; + mRtpStreamIdExtId = id; + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::SendVideoFrame(unsigned char* video_frame, + unsigned int video_frame_length, + unsigned short width, + unsigned short height, + VideoType video_type, + uint64_t capture_time) +{ + + //check for the parameters sanity + if(!video_frame || video_frame_length == 0 || + width == 0 || height == 0) + { + CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__); + MOZ_ASSERT(false); + return kMediaConduitMalformedArgument; + } + MOZ_ASSERT(video_type == VideoType::kVideoI420); + MOZ_ASSERT(mPtrExtCapture); + + // Transmission should be enabled before we insert any frames. + if(!mEngineTransmitting) + { + CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + // insert the frame to video engine in I420 format only + webrtc::I420VideoFrame i420_frame; + i420_frame.CreateFrame(video_frame, width, height, webrtc::kVideoRotation_0); + i420_frame.set_timestamp(capture_time); + i420_frame.set_render_time_ms(capture_time); + + return SendVideoFrame(i420_frame); +} + +MediaConduitErrorCode +WebrtcVideoConduit::SendVideoFrame(webrtc::I420VideoFrame& frame) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + // See if we need to recalculate what we're sending. + // Don't compare mSendingWidth/Height, since those may not be the same as the input. + { + MutexAutoLock lock(mCodecMutex); + if (mInReconfig) { + // Waiting for it to finish + return kMediaConduitNoError; + } + if (frame.width() != mLastWidth || frame.height() != mLastHeight) { + CSFLogDebug(logTag, "%s: call SelectSendResolution with %ux%u", + __FUNCTION__, frame.width(), frame.height()); + if (SelectSendResolution(frame.width(), frame.height(), &frame)) { + // SelectSendResolution took ownership of the data in i420_frame. + // Submit the frame after reconfig is done + return kMediaConduitNoError; + } + } + } + mPtrExtCapture->IncomingFrame(frame); + + mVideoCodecStat->SentFrame(); + CSFLogDebug(logTag, "%s Inserted a frame", __FUNCTION__); + return kMediaConduitNoError; +} + +// Transport Layer Callbacks +MediaConduitErrorCode +WebrtcVideoConduit::ReceivedRTPPacket(const void *data, int len) +{ + CSFLogDebug(logTag, "%s: seq# %u, Channel %d, Len %d ", __FUNCTION__, + (uint16_t) ntohs(((uint16_t*) data)[1]), mChannel, len); + + // Media Engine should be receiving already. + if(mEngineReceiving) + { + // let the engine know of a RTP packet to decode + // XXX we need to get passed the time the packet was received + if(mPtrViENetwork->ReceivedRTPPacket(mChannel, data, len, webrtc::PacketTime()) == -1) + { + int error = mPtrViEBase->LastError(); + CSFLogError(logTag, "%s RTP Processing Failed %d ", __FUNCTION__, error); + if(error >= kViERtpRtcpInvalidChannelId && error <= kViERtpRtcpRtcpDisabled) + { + return kMediaConduitRTPProcessingFailed; + } + return kMediaConduitRTPRTCPModuleError; + } + } else { + CSFLogError(logTag, "Error: %s when not receiving", __FUNCTION__); + return kMediaConduitSessionNotInited; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::ReceivedRTCPPacket(const void *data, int len) +{ + CSFLogDebug(logTag, " %s Channel %d, Len %d ", __FUNCTION__, mChannel, len); + + //Media Engine should be receiving already + if(mPtrViENetwork->ReceivedRTCPPacket(mChannel,data,len) == -1) + { + int error = mPtrViEBase->LastError(); + CSFLogError(logTag, "%s RTCP Processing Failed %d", __FUNCTION__, error); + if(error >= kViERtpRtcpInvalidChannelId && error <= kViERtpRtcpRtcpDisabled) + { + return kMediaConduitRTPProcessingFailed; + } + return kMediaConduitRTPRTCPModuleError; + } + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::StopTransmitting() +{ + if(mEngineTransmitting) + { + CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); + if(mPtrViEBase->StopSend(mChannel) == -1) + { + CSFLogError(logTag, "%s StopSend() Failed %d ",__FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitUnknownError; + } + + mEngineTransmitting = false; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::StartTransmitting() +{ + if (!mEngineTransmitting) { + if(mPtrViEBase->StartSend(mChannel) == -1) + { + CSFLogError(logTag, "%s Start Send Error %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitUnknownError; + } + + mEngineTransmitting = true; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::StopReceiving() +{ + NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); + // Are we receiving already? If so, stop receiving and playout + // since we can't apply new recv codec when the engine is playing. + if(mEngineReceiving) + { + CSFLogDebug(logTag, "%s Engine Already Receiving . Attemping to Stop ", __FUNCTION__); + if(mPtrViEBase->StopReceive(mChannel) == -1) + { + int error = mPtrViEBase->LastError(); + if(error == kViEBaseUnknownError) + { + CSFLogDebug(logTag, "%s StopReceive() Success ", __FUNCTION__); + } else { + CSFLogError(logTag, "%s StopReceive() Failed %d ", __FUNCTION__, + mPtrViEBase->LastError()); + return kMediaConduitUnknownError; + } + } + mEngineReceiving = false; + } + + return kMediaConduitNoError; +} + +MediaConduitErrorCode +WebrtcVideoConduit::StartReceiving() +{ + if (!mEngineReceiving) { + CSFLogDebug(logTag, "%s Attemping to start... ", __FUNCTION__); + //Start Receive on the video engine + if(mPtrViEBase->StartReceive(mChannel) == -1) + { + int error = mPtrViEBase->LastError(); + CSFLogError(logTag, "%s Start Receive Error %d ", __FUNCTION__, error); + + return kMediaConduitUnknownError; + } + + mEngineReceiving = true; + } + + return kMediaConduitNoError; +} + +//WebRTC::RTP Callback Implementation +// Called on MSG thread +int WebrtcVideoConduit::SendPacket(int channel, const void* data, size_t len) +{ + CSFLogDebug(logTag, "%s : channel %d len %lu", __FUNCTION__, channel, (unsigned long) len); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mTransmitterTransport && + (mTransmitterTransport->SendRtpPacket(data, len) == NS_OK)) + { + CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__); + return len; + } else { + CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__); + return -1; + } +} + +// Called from multiple threads including webrtc Process thread +int WebrtcVideoConduit::SendRTCPPacket(int channel, const void* data, size_t len) +{ + CSFLogDebug(logTag, "%s : channel %d , len %lu ", __FUNCTION__, channel, (unsigned long) len); + + // We come here if we have only one pipeline/conduit setup, + // such as for unidirectional streams. + // We also end up here if we are receiving + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mReceiverTransport && + mReceiverTransport->SendRtcpPacket(data, len) == NS_OK) + { + // Might be a sender report, might be a receiver report, we don't know. + CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__); + return len; + } else if(mTransmitterTransport && + (mTransmitterTransport->SendRtcpPacket(data, len) == NS_OK)) { + CSFLogDebug(logTag, "%s Sent RTCP Packet (sender report) ", __FUNCTION__); + return len; + } else { + CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__); + return -1; + } +} + +// WebRTC::ExternalMedia Implementation +int +WebrtcVideoConduit::FrameSizeChange(unsigned int width, + unsigned int height, + unsigned int numStreams) +{ + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + mReceivingWidth = width; + mReceivingHeight = height; + mNumReceivingStreams = numStreams; + + if(mRenderer) + { + mRenderer->FrameSizeChange(width, height, numStreams); + return 0; + } + + CSFLogError(logTag, "%s Renderer is NULL ", __FUNCTION__); + return -1; +} + +int +WebrtcVideoConduit::DeliverFrame(unsigned char* buffer, + size_t buffer_size, + uint32_t time_stamp, + int64_t ntp_time_ms, + int64_t render_time, + void *handle) +{ + return DeliverFrame(buffer, buffer_size, mReceivingWidth, (mReceivingWidth+1)>>1, + time_stamp, ntp_time_ms, render_time, handle); +} + +int +WebrtcVideoConduit::DeliverFrame(unsigned char* buffer, + size_t buffer_size, + uint32_t y_stride, + uint32_t cbcr_stride, + uint32_t time_stamp, + int64_t ntp_time_ms, + int64_t render_time, + void *handle) +{ + CSFLogDebug(logTag, "%s Buffer Size %lu", __FUNCTION__, (unsigned long) buffer_size); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mRenderer) + { + layers::Image* img = nullptr; + // |handle| should be a webrtc::NativeHandle if available. + if (handle) { + webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(handle); + // In the handle, there should be a layers::Image. + img = static_cast<layers::Image*>(native_h->GetHandle()); + } + + if (mVideoLatencyTestEnable && mReceivingWidth && mReceivingHeight) { + uint64_t now = PR_Now(); + uint64_t timestamp = 0; + bool ok = YuvStamper::Decode(mReceivingWidth, mReceivingHeight, mReceivingWidth, + buffer, + reinterpret_cast<unsigned char*>(×tamp), + sizeof(timestamp), 0, 0); + if (ok) { + VideoLatencyUpdate(now - timestamp); + } + } + + const ImageHandle img_h(img); + mRenderer->RenderVideoFrame(buffer, buffer_size, y_stride, cbcr_stride, + time_stamp, render_time, img_h); + return 0; + } + + CSFLogError(logTag, "%s Renderer is NULL ", __FUNCTION__); + return -1; +} + +int +WebrtcVideoConduit::DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) +{ + if (!webrtc_frame.native_handle()) { + uint32_t y_stride = webrtc_frame.stride(static_cast<webrtc::PlaneType>(0)); + return DeliverFrame(const_cast<uint8_t*>(webrtc_frame.buffer(webrtc::kYPlane)), + CalcBufferSize(webrtc::kI420, y_stride, webrtc_frame.height()), + y_stride, + webrtc_frame.stride(static_cast<webrtc::PlaneType>(1)), + webrtc_frame.timestamp(), + webrtc_frame.ntp_time_ms(), + webrtc_frame.render_time_ms(), nullptr); + } + size_t buffer_size = CalcBufferSize(webrtc::kI420, webrtc_frame.width(), webrtc_frame.height()); + CSFLogDebug(logTag, "%s Buffer Size %lu", __FUNCTION__, (unsigned long) buffer_size); + + ReentrantMonitorAutoEnter enter(mTransportMonitor); + if(mRenderer) + { + layers::Image* img = nullptr; + // |handle| should be a webrtc::NativeHandle if available. + webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(webrtc_frame.native_handle()); + if (native_h) { + // In the handle, there should be a layers::Image. + img = static_cast<layers::Image*>(native_h->GetHandle()); + } + +#if 0 + //#ifndef MOZ_WEBRTC_OMX + // XXX - this may not be possible on GONK with textures! + if (mVideoLatencyTestEnable && mReceivingWidth && mReceivingHeight) { + uint64_t now = PR_Now(); + uint64_t timestamp = 0; + bool ok = YuvStamper::Decode(mReceivingWidth, mReceivingHeight, mReceivingWidth, + buffer, + reinterpret_cast<unsigned char*>(×tamp), + sizeof(timestamp), 0, 0); + if (ok) { + VideoLatencyUpdate(now - timestamp); + } + } +#endif + + const ImageHandle img_h(img); + mRenderer->RenderVideoFrame(nullptr, buffer_size, webrtc_frame.timestamp(), + webrtc_frame.render_time_ms(), img_h); + return 0; + } + + CSFLogError(logTag, "%s Renderer is NULL ", __FUNCTION__); + return -1; +} + +/** + * Copy the codec passed into Conduit's database + */ + +void +WebrtcVideoConduit::CodecConfigToWebRTCCodec(const VideoCodecConfig* codecInfo, + webrtc::VideoCodec& cinst) +{ + // Note: this assumes cinst is initialized to a base state either by + // hand or from a config fetched with GetConfig(); this modifies the config + // to match parameters from VideoCodecConfig + cinst.plType = codecInfo->mType; + if (codecInfo->mName == "H264") { + cinst.codecType = webrtc::kVideoCodecH264; + PL_strncpyz(cinst.plName, "H264", sizeof(cinst.plName)); + } else if (codecInfo->mName == "VP8") { + cinst.codecType = webrtc::kVideoCodecVP8; + PL_strncpyz(cinst.plName, "VP8", sizeof(cinst.plName)); + } else if (codecInfo->mName == "VP9") { + cinst.codecType = webrtc::kVideoCodecVP9; + PL_strncpyz(cinst.plName, "VP9", sizeof(cinst.plName)); + } else if (codecInfo->mName == "I420") { + cinst.codecType = webrtc::kVideoCodecI420; + PL_strncpyz(cinst.plName, "I420", sizeof(cinst.plName)); + } else { + cinst.codecType = webrtc::kVideoCodecUnknown; + PL_strncpyz(cinst.plName, "Unknown", sizeof(cinst.plName)); + } + + // width/height will be overridden on the first frame; they must be 'sane' for + // SetSendCodec() + if (codecInfo->mEncodingConstraints.maxFps > 0) { + cinst.maxFramerate = codecInfo->mEncodingConstraints.maxFps; + } else { + cinst.maxFramerate = DEFAULT_VIDEO_MAX_FRAMERATE; + } + + // Defaults if rates aren't forced by pref. Typically defaults are + // overridden on the first video frame. + cinst.minBitrate = mMinBitrate ? mMinBitrate : 200; + cinst.startBitrate = mStartBitrate ? mStartBitrate : 300; + cinst.targetBitrate = cinst.startBitrate; + cinst.maxBitrate = mMaxBitrate ? mMaxBitrate : 2000; + + if (cinst.codecType == webrtc::kVideoCodecH264) + { +#ifdef MOZ_WEBRTC_OMX + cinst.resolution_divisor = 16; +#endif + // cinst.codecSpecific.H264.profile = ? + cinst.codecSpecific.H264.profile_byte = codecInfo->mProfile; + cinst.codecSpecific.H264.constraints = codecInfo->mConstraints; + cinst.codecSpecific.H264.level = codecInfo->mLevel; + cinst.codecSpecific.H264.packetizationMode = codecInfo->mPacketizationMode; + if (codecInfo->mEncodingConstraints.maxBr > 0) { + // webrtc.org uses kbps, we use bps + cinst.maxBitrate = + MinIgnoreZero(cinst.maxBitrate, + codecInfo->mEncodingConstraints.maxBr)/1000; + } + if (codecInfo->mEncodingConstraints.maxMbps > 0) { + // Not supported yet! + CSFLogError(logTag, "%s H.264 max_mbps not supported yet ", __FUNCTION__); + } + // XXX parse the encoded SPS/PPS data + // paranoia + cinst.codecSpecific.H264.spsData = nullptr; + cinst.codecSpecific.H264.spsLen = 0; + cinst.codecSpecific.H264.ppsData = nullptr; + cinst.codecSpecific.H264.ppsLen = 0; + } + // Init mSimulcastEncodings always since they hold info from setParameters. + // TODO(bug 1210175): H264 doesn't support simulcast yet. + size_t numberOfSimulcastEncodings = std::min(codecInfo->mSimulcastEncodings.size(), (size_t)webrtc::kMaxSimulcastStreams); + for (size_t i = 0; i < numberOfSimulcastEncodings; ++i) { + const VideoCodecConfig::SimulcastEncoding& encoding = + codecInfo->mSimulcastEncodings[i]; + // Make sure the constraints on the whole stream are reflected. + webrtc::SimulcastStream stream; + memset(&stream, 0, sizeof(stream)); + stream.width = cinst.width; + stream.height = cinst.height; + stream.numberOfTemporalLayers = 1; + stream.maxBitrate = cinst.maxBitrate; + stream.targetBitrate = cinst.targetBitrate; + stream.minBitrate = cinst.minBitrate; + stream.qpMax = cinst.qpMax; + strncpy(stream.rid, encoding.rid.c_str(), sizeof(stream.rid)-1); + stream.rid[sizeof(stream.rid) - 1] = 0; + + // Apply encoding-specific constraints. + stream.width = MinIgnoreZero( + stream.width, + (unsigned short)encoding.constraints.maxWidth); + stream.height = MinIgnoreZero( + stream.height, + (unsigned short)encoding.constraints.maxHeight); + + // webrtc.org uses kbps, we use bps + stream.jsMaxBitrate = encoding.constraints.maxBr/1000; + stream.jsScaleDownBy = encoding.constraints.scaleDownBy; + + MOZ_ASSERT(stream.jsScaleDownBy >= 1.0); + uint32_t width = stream.width? stream.width : 640; + uint32_t height = stream.height? stream.height : 480; + uint32_t new_width = uint32_t(width / stream.jsScaleDownBy); + uint32_t new_height = uint32_t(height / stream.jsScaleDownBy); + + if (new_width != width || new_height != height) { + // Estimate. Overridden on first frame. + SelectBitrates(new_width, new_height, stream.jsMaxBitrate, + mLastFramerateTenths, + stream.minBitrate, + stream.targetBitrate, + stream.maxBitrate); + } + // webrtc.org expects simulcast streams to be ordered by increasing + // fidelity, our jsep code does the opposite. + cinst.simulcastStream[numberOfSimulcastEncodings-i-1] = stream; + } + + cinst.numberOfSimulcastStreams = numberOfSimulcastEncodings; +} + +/** + * Perform validation on the codecConfig to be applied + * Verifies if the codec is already applied. + */ +MediaConduitErrorCode +WebrtcVideoConduit::ValidateCodecConfig(const VideoCodecConfig* codecInfo, + bool send) +{ + if(!codecInfo) + { + CSFLogError(logTag, "%s Null CodecConfig ", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + if((codecInfo->mName.empty()) || + (codecInfo->mName.length() >= CODEC_PLNAME_SIZE)) + { + CSFLogError(logTag, "%s Invalid Payload Name Length ", __FUNCTION__); + return kMediaConduitMalformedArgument; + } + + return kMediaConduitNoError; +} + +void +WebrtcVideoConduit::VideoLatencyUpdate(uint64_t newSample) +{ + mVideoLatencyAvg = (sRoundingPadding * newSample + sAlphaNum * mVideoLatencyAvg) / sAlphaDen; +} + +uint64_t +WebrtcVideoConduit::MozVideoLatencyAvg() +{ + return mVideoLatencyAvg / sRoundingPadding; +} + +uint64_t +WebrtcVideoConduit::CodecPluginID() +{ + if (mExternalSendCodecHandle) { + return mExternalSendCodecHandle->PluginID(); + } else if (mExternalRecvCodecHandle) { + return mExternalRecvCodecHandle->PluginID(); + } + return 0; +} + +bool +WebrtcVideoConduit::DetermineREDAndULPFECPayloadTypes(uint8_t &payload_type_red, uint8_t &payload_type_ulpfec) +{ + webrtc::VideoCodec video_codec; + payload_type_red = INVALID_RTP_PAYLOAD; + payload_type_ulpfec = INVALID_RTP_PAYLOAD; + + for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) + { + if(mPtrViECodec->GetCodec(idx, video_codec) == 0) + { + switch(video_codec.codecType) { + case webrtc::VideoCodecType::kVideoCodecRED: + payload_type_red = video_codec.plType; + break; + case webrtc::VideoCodecType::kVideoCodecULPFEC: + payload_type_ulpfec = video_codec.plType; + break; + default: + break; + } + } + } + + return payload_type_red != INVALID_RTP_PAYLOAD + && payload_type_ulpfec != INVALID_RTP_PAYLOAD; +} + +}// end namespace diff --git a/media/webrtc/signaling/src/media-conduit/VideoConduit.h b/media/webrtc/signaling/src/media-conduit/VideoConduit.h new file mode 100755 index 000000000..323a6a284 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h @@ -0,0 +1,429 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef VIDEO_SESSION_H_ +#define VIDEO_SESSION_H_ + +#include "nsAutoPtr.h" +#include "mozilla/Attributes.h" +#include "mozilla/Atomics.h" + +#include "MediaConduitInterface.h" +#include "MediaEngineWrapper.h" +#include "CodecStatistics.h" +#include "LoadManagerFactory.h" +#include "LoadManager.h" +#include "runnable_utils.h" + +// conflicts with #include of scoped_ptr.h +#undef FF +// Video Engine Includes +#include "webrtc/common_types.h" +#ifdef FF +#undef FF // Avoid name collision between scoped_ptr.h and nsCRTGlue.h. +#endif +#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h" +#include "webrtc/video_engine/include/vie_base.h" +#include "webrtc/video_engine/include/vie_capture.h" +#include "webrtc/video_engine/include/vie_codec.h" +#include "webrtc/video_engine/include/vie_external_codec.h" +#include "webrtc/video_engine/include/vie_render.h" +#include "webrtc/video_engine/include/vie_network.h" +#include "webrtc/video_engine/include/vie_rtp_rtcp.h" + +/** This file hosts several structures identifying different aspects + * of a RTP Session. + */ + + using webrtc::ViEBase; + using webrtc::ViENetwork; + using webrtc::ViECodec; + using webrtc::ViECapture; + using webrtc::ViERender; + using webrtc::ViEExternalCapture; + using webrtc::ViEExternalCodec; + +namespace mozilla { + +class WebrtcAudioConduit; +class nsThread; + +// Interface of external video encoder for WebRTC. +class WebrtcVideoEncoder:public VideoEncoder + ,public webrtc::VideoEncoder +{}; + +// Interface of external video decoder for WebRTC. +class WebrtcVideoDecoder:public VideoDecoder + ,public webrtc::VideoDecoder +{}; + +/** + * Concrete class for Video session. Hooks up + * - media-source and target to external transport + */ +class WebrtcVideoConduit : public VideoSessionConduit + , public webrtc::Transport + , public webrtc::ExternalRenderer +{ +public: + //VoiceEngine defined constant for Payload Name Size. + static const unsigned int CODEC_PLNAME_SIZE; + + /** + * Set up A/V sync between this (incoming) VideoConduit and an audio conduit. + */ + void SyncTo(WebrtcAudioConduit *aConduit); + + /** + * Function to attach Renderer end-point for the Media-Video conduit. + * @param aRenderer : Reference to the concrete Video renderer implementation + * Note: Multiple invocations of this API shall remove an existing renderer + * and attaches the new to the Conduit. + */ + virtual MediaConduitErrorCode AttachRenderer(RefPtr<VideoRenderer> aVideoRenderer) override; + virtual void DetachRenderer() override; + + /** + * APIs used by the registered external transport to this Conduit to + * feed in received RTP Frames to the VideoEngine for decoding + */ + virtual MediaConduitErrorCode ReceivedRTPPacket(const void *data, int len) override; + + /** + * APIs used by the registered external transport to this Conduit to + * feed in received RTP Frames to the VideoEngine for decoding + */ + virtual MediaConduitErrorCode ReceivedRTCPPacket(const void *data, int len) override; + + virtual MediaConduitErrorCode StopTransmitting() override; + virtual MediaConduitErrorCode StartTransmitting() override; + virtual MediaConduitErrorCode StopReceiving() override; + virtual MediaConduitErrorCode StartReceiving() override; + + /** + * Function to configure sending codec mode for different content + */ + virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) override; + + /** + * Function to configure send codec for the video session + * @param sendSessionConfig: CodecConfiguration + * @result: On Success, the video engine is configured with passed in codec for send + * On failure, video engine transmit functionality is disabled. + * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting + * transmission sub-system on the engine. + */ + virtual MediaConduitErrorCode ConfigureSendMediaCodec(const VideoCodecConfig* codecInfo) override; + + /** + * Function to configure list of receive codecs for the video session + * @param sendSessionConfig: CodecConfiguration + * @result: On Success, the video engine is configured with passed in codec for send + * Also the playout is enabled. + * On failure, video engine transmit functionality is disabled. + * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting + * transmission sub-system on the engine. + */ + virtual MediaConduitErrorCode ConfigureRecvMediaCodecs( + const std::vector<VideoCodecConfig* >& codecConfigList) override; + + /** + * Register Transport for this Conduit. RTP and RTCP frames from the VideoEngine + * shall be passed to the registered transport for transporting externally. + */ + virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) override; + + virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) override; + + /** + * Function to set the encoding bitrate limits based on incoming frame size and rate + * @param width, height: dimensions of the frame + * @param cap: user-enforced max bitrate, or 0 + * @param aLastFramerateTenths: holds the current input framerate + * @param out_start, out_min, out_max: bitrate results + */ + void SelectBitrates(unsigned short width, + unsigned short height, + unsigned int cap, + mozilla::Atomic<int32_t, mozilla::Relaxed>& aLastFramerateTenths, + unsigned int& out_min, + unsigned int& out_start, + unsigned int& out_max); + + /** + * Function to select and change the encoding resolution based on incoming frame size + * and current available bandwidth. + * @param width, height: dimensions of the frame + * @param frame: optional frame to submit for encoding after reconfig + */ + bool SelectSendResolution(unsigned short width, + unsigned short height, + webrtc::I420VideoFrame *frame); + + /** + * Function to reconfigure the current send codec for a different + * width/height/framerate/etc. + * @param width, height: dimensions of the frame + * @param frame: optional frame to submit for encoding after reconfig + */ + nsresult ReconfigureSendCodec(unsigned short width, + unsigned short height, + webrtc::I420VideoFrame *frame); + + /** + * Function to select and change the encoding frame rate based on incoming frame rate + * and max-mbps setting. + * @param current framerate + * @result new framerate + */ + unsigned int SelectSendFrameRate(unsigned int framerate) const; + + /** + * Function to deliver a capture video frame for encoding and transport + * @param video_frame: pointer to captured video-frame. + * @param video_frame_length: size of the frame + * @param width, height: dimensions of the frame + * @param video_type: Type of the video frame - I420, RAW + * @param captured_time: timestamp when the frame was captured. + * if 0 timestamp is automatcally generated by the engine. + *NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked + * This ensures the inserted video-frames can be transmitted by the conduit + */ + virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame, + unsigned int video_frame_length, + unsigned short width, + unsigned short height, + VideoType video_type, + uint64_t capture_time) override; + virtual MediaConduitErrorCode SendVideoFrame(webrtc::I420VideoFrame& frame) override; + + /** + * Set an external encoder object |encoder| to the payload type |pltype| + * for sender side codec. + */ + virtual MediaConduitErrorCode SetExternalSendCodec(VideoCodecConfig* config, + VideoEncoder* encoder) override; + + /** + * Set an external decoder object |decoder| to the payload type |pltype| + * for receiver side codec. + */ + virtual MediaConduitErrorCode SetExternalRecvCodec(VideoCodecConfig* config, + VideoDecoder* decoder) override; + + /** + * Enables use of Rtp Stream Id, and sets the extension ID. + */ + virtual MediaConduitErrorCode EnableRTPStreamIdExtension(bool enabled, uint8_t id) override; + + /** + * Webrtc transport implementation to send and receive RTP packet. + * VideoConduit registers itself as ExternalTransport to the VideoEngine + */ + virtual int SendPacket(int channel, const void *data, size_t len) override; + + /** + * Webrtc transport implementation to send and receive RTCP packet. + * VideoConduit registers itself as ExternalTransport to the VideoEngine + */ + virtual int SendRTCPPacket(int channel, const void *data, size_t len) override; + + + /** + * Webrtc External Renderer Implementation APIs. + * Raw I420 Frames are delivred to the VideoConduit by the VideoEngine + */ + virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int) override; + + virtual int DeliverFrame(unsigned char*, size_t, uint32_t , int64_t, + int64_t, void *handle) override; + + virtual int DeliverFrame(unsigned char*, size_t, uint32_t, uint32_t, uint32_t , int64_t, + int64_t, void *handle); + + virtual int DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) override; + + /** + * Does DeliverFrame() support a null buffer and non-null handle + * (video texture)? + * B2G support it (when using HW video decoder with graphic buffer output). + * XXX Investigate! Especially for Android + */ + virtual bool IsTextureSupported() override { +#ifdef WEBRTC_GONK + return true; +#else + return false; +#endif + } + + virtual uint64_t CodecPluginID() override; + + unsigned short SendingWidth() override { + return mSendingWidth; + } + + unsigned short SendingHeight() override { + return mSendingHeight; + } + + unsigned int SendingMaxFs() override { + if(mCurSendCodecConfig) { + return mCurSendCodecConfig->mEncodingConstraints.maxFs; + } + return 0; + } + + unsigned int SendingMaxFr() override { + if(mCurSendCodecConfig) { + return mCurSendCodecConfig->mEncodingConstraints.maxFps; + } + return 0; + } + + WebrtcVideoConduit(); + virtual ~WebrtcVideoConduit(); + + MediaConduitErrorCode InitMain(); + virtual MediaConduitErrorCode Init(); + virtual void Destroy(); + + int GetChannel() { return mChannel; } + webrtc::VideoEngine* GetVideoEngine() { return mVideoEngine; } + bool GetLocalSSRC(unsigned int* ssrc) override; + bool SetLocalSSRC(unsigned int ssrc) override; + bool GetRemoteSSRC(unsigned int* ssrc) override; + bool SetLocalCNAME(const char* cname) override; + bool GetVideoEncoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* droppedFrames) override; + bool GetVideoDecoderStats(double* framerateMean, + double* framerateStdDev, + double* bitrateMean, + double* bitrateStdDev, + uint32_t* discardedPackets) override; + bool GetAVStats(int32_t* jitterBufferDelayMs, + int32_t* playoutBufferDelayMs, + int32_t* avSyncOffsetMs) override; + bool GetRTPStats(unsigned int* jitterMs, unsigned int* cumulativeLost) override; + bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp, + uint32_t* jitterMs, + uint32_t* packetsReceived, + uint64_t* bytesReceived, + uint32_t* cumulativeLost, + int32_t* rttMs) override; + bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp, + unsigned int* packetsSent, + uint64_t* bytesSent) override; + uint64_t MozVideoLatencyAvg(); + +private: + DISALLOW_COPY_AND_ASSIGN(WebrtcVideoConduit); + + static inline bool OnThread(nsIEventTarget *thread) + { + bool on; + nsresult rv; + rv = thread->IsOnCurrentThread(&on); + + // If the target thread has already shut down, we don't want to assert. + if (rv != NS_ERROR_NOT_INITIALIZED) { + MOZ_ASSERT(NS_SUCCEEDED(rv)); + } + + if (NS_WARN_IF(NS_FAILED(rv))) { + return false; + } + return on; + } + + //Local database of currently applied receive codecs + typedef std::vector<VideoCodecConfig* > RecvCodecList; + + //Function to convert between WebRTC and Conduit codec structures + void CodecConfigToWebRTCCodec(const VideoCodecConfig* codecInfo, + webrtc::VideoCodec& cinst); + + //Checks the codec to be applied + MediaConduitErrorCode ValidateCodecConfig(const VideoCodecConfig* codecInfo, bool send); + + //Utility function to dump recv codec database + void DumpCodecDB() const; + + // Video Latency Test averaging filter + void VideoLatencyUpdate(uint64_t new_sample); + + // Utility function to determine RED and ULPFEC payload types + bool DetermineREDAndULPFECPayloadTypes(uint8_t &payload_type_red, uint8_t &payload_type_ulpfec); + + webrtc::VideoEngine* mVideoEngine; + mozilla::ReentrantMonitor mTransportMonitor; + RefPtr<TransportInterface> mTransmitterTransport; + RefPtr<TransportInterface> mReceiverTransport; + RefPtr<VideoRenderer> mRenderer; + + ScopedCustomReleasePtr<webrtc::ViEBase> mPtrViEBase; + ScopedCustomReleasePtr<webrtc::ViECapture> mPtrViECapture; + ScopedCustomReleasePtr<webrtc::ViECodec> mPtrViECodec; + ScopedCustomReleasePtr<webrtc::ViENetwork> mPtrViENetwork; + ScopedCustomReleasePtr<webrtc::ViERender> mPtrViERender; + ScopedCustomReleasePtr<webrtc::ViERTP_RTCP> mPtrRTP; + ScopedCustomReleasePtr<webrtc::ViEExternalCodec> mPtrExtCodec; + + webrtc::ViEExternalCapture* mPtrExtCapture; + + // Engine state we are concerned with. + mozilla::Atomic<bool> mEngineTransmitting; //If true ==> Transmit Sub-system is up and running + mozilla::Atomic<bool> mEngineReceiving; // if true ==> Receive Sus-sysmtem up and running + + int mChannel; // Video Channel for this conduit + int mCapId; // Capturer for this conduit + + Mutex mCodecMutex; // protects mCurrSendCodecConfig + nsAutoPtr<VideoCodecConfig> mCurSendCodecConfig; + bool mInReconfig; + + unsigned short mLastWidth; + unsigned short mLastHeight; + unsigned short mSendingWidth; + unsigned short mSendingHeight; + unsigned short mReceivingWidth; + unsigned short mReceivingHeight; + unsigned int mSendingFramerate; + // scaled by *10 because Atomic<double/float> isn't supported + mozilla::Atomic<int32_t, mozilla::Relaxed> mLastFramerateTenths; + unsigned short mNumReceivingStreams; + bool mVideoLatencyTestEnable; + uint64_t mVideoLatencyAvg; + uint32_t mMinBitrate; + uint32_t mStartBitrate; + uint32_t mMaxBitrate; + uint32_t mMinBitrateEstimate; + + bool mRtpStreamIdEnabled; + uint8_t mRtpStreamIdExtId; + + static const unsigned int sAlphaNum = 7; + static const unsigned int sAlphaDen = 8; + static const unsigned int sRoundingPadding = 1024; + + RefPtr<WebrtcAudioConduit> mSyncedTo; + + nsAutoPtr<VideoCodecConfig> mExternalSendCodec; + nsAutoPtr<VideoCodecConfig> mExternalRecvCodec; + nsAutoPtr<VideoEncoder> mExternalSendCodecHandle; + nsAutoPtr<VideoDecoder> mExternalRecvCodecHandle; + + // statistics object for video codec; + nsAutoPtr<VideoCodecStatistics> mVideoCodecStat; + + nsAutoPtr<LoadManager> mLoadManager; + webrtc::VideoCodecMode mCodecMode; +}; +} // end namespace + +#endif diff --git a/media/webrtc/signaling/src/media-conduit/VideoTypes.h b/media/webrtc/signaling/src/media-conduit/VideoTypes.h new file mode 100755 index 000000000..e4cff3962 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/VideoTypes.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2012, The WebRTC project authors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * * Neither the name of Google nor the names of its contributors may + * be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef VIDEO_TYPE_ +#define VIDEO_TYPE_ + +namespace mozilla +{ +/* + * Enumeration for different video types supported by the + * video-engine. If more types will be supported in the future + * newer one shall be appended to the bottom of the list + */ +enum VideoType +{ + kVideoI420 = 0, + kVideoYV12 = 1, + kVideoYUY2 = 2, + kVideoUYVY = 3, + kVideoIYUV = 4, + kVideoARGB = 5, + kVideoRGB24 = 6, + kVideoRGB565 = 7, + kVideoARGB4444 = 8, + kVideoARGB1555 = 9, + kVideoMJPEG = 10, + kVideoNV12 = 11, + kVideoNV21 = 12, + kVideoBGRA = 13, + kVideoUnknown = 99 +}; +} +#endif diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp new file mode 100644 index 000000000..ad47e5316 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp @@ -0,0 +1,965 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "WebrtcGmpVideoCodec.h" + +#include <iostream> +#include <vector> + +#include "mozilla/Move.h" +#include "mozilla/SyncRunnable.h" +#include "VideoConduit.h" +#include "AudioConduit.h" +#include "runnable_utils.h" + +#include "mozIGeckoMediaPluginService.h" +#include "nsServiceManagerUtils.h" +#include "GMPVideoDecoderProxy.h" +#include "GMPVideoEncoderProxy.h" +#include "MainThreadUtils.h" + +#include "gmp-video-host.h" +#include "gmp-video-frame-i420.h" +#include "gmp-video-frame-encoded.h" + +#include "webrtc/video_engine/include/vie_external_codec.h" + +namespace mozilla { + +#ifdef LOG +#undef LOG +#endif + +#ifdef MOZILLA_INTERNAL_API +extern mozilla::LogModule* GetGMPLog(); +#else +// For CPP unit tests +PRLogModuleInfo* +GetGMPLog() +{ + static PRLogModuleInfo *sLog; + if (!sLog) + sLog = PR_NewLogModule("GMP"); + return sLog; +} +#endif +#define LOGD(msg) MOZ_LOG(GetGMPLog(), mozilla::LogLevel::Debug, msg) +#define LOG(level, msg) MOZ_LOG(GetGMPLog(), (level), msg) + +WebrtcGmpPCHandleSetter::WebrtcGmpPCHandleSetter(const std::string& aPCHandle) +{ + if (!NS_IsMainThread()) { + MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main"); + return; + } + MOZ_ASSERT(sCurrentHandle.empty()); + sCurrentHandle = aPCHandle; +} + +WebrtcGmpPCHandleSetter::~WebrtcGmpPCHandleSetter() +{ + if (!NS_IsMainThread()) { + MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main"); + return; + } + + sCurrentHandle.clear(); +} + +/* static */ std::string +WebrtcGmpPCHandleSetter::GetCurrentHandle() +{ + if (!NS_IsMainThread()) { + MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main"); + return ""; + } + + return sCurrentHandle; +} + +std::string WebrtcGmpPCHandleSetter::sCurrentHandle = ""; + +// Encoder. +WebrtcGmpVideoEncoder::WebrtcGmpVideoEncoder() + : mGMP(nullptr) + , mInitting(false) + , mHost(nullptr) + , mMaxPayloadSize(0) + , mCallbackMutex("WebrtcGmpVideoEncoder encoded callback mutex") + , mCallback(nullptr) + , mCachedPluginId(0) +{ +#ifdef MOZILLA_INTERNAL_API + if (mPCHandle.empty()) { + mPCHandle = WebrtcGmpPCHandleSetter::GetCurrentHandle(); + } + MOZ_ASSERT(!mPCHandle.empty()); +#endif +} + +WebrtcGmpVideoEncoder::~WebrtcGmpVideoEncoder() +{ + // We should not have been destroyed if we never closed our GMP + MOZ_ASSERT(!mGMP); +} + +static int +WebrtcFrameTypeToGmpFrameType(webrtc::VideoFrameType aIn, + GMPVideoFrameType *aOut) +{ + MOZ_ASSERT(aOut); + switch(aIn) { + case webrtc::kKeyFrame: + *aOut = kGMPKeyFrame; + break; + case webrtc::kDeltaFrame: + *aOut = kGMPDeltaFrame; + break; + case webrtc::kGoldenFrame: + *aOut = kGMPGoldenFrame; + break; + case webrtc::kAltRefFrame: + *aOut = kGMPAltRefFrame; + break; + case webrtc::kSkipFrame: + *aOut = kGMPSkipFrame; + break; + default: + MOZ_CRASH("Unexpected VideoFrameType"); + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +static int +GmpFrameTypeToWebrtcFrameType(GMPVideoFrameType aIn, + webrtc::VideoFrameType *aOut) +{ + MOZ_ASSERT(aOut); + switch(aIn) { + case kGMPKeyFrame: + *aOut = webrtc::kKeyFrame; + break; + case kGMPDeltaFrame: + *aOut = webrtc::kDeltaFrame; + break; + case kGMPGoldenFrame: + *aOut = webrtc::kGoldenFrame; + break; + case kGMPAltRefFrame: + *aOut = webrtc::kAltRefFrame; + break; + case kGMPSkipFrame: + *aOut = webrtc::kSkipFrame; + break; + default: + MOZ_CRASH("Unexpected GMPVideoFrameType"); + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoEncoder::InitEncode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores, + uint32_t aMaxPayloadSize) +{ + if (!mMPS) { + mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1"); + } + MOZ_ASSERT(mMPS); + + if (!mGMPThread) { + if (NS_WARN_IF(NS_FAILED(mMPS->GetThread(getter_AddRefs(mGMPThread))))) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + + // Bug XXXXXX: transfer settings from codecSettings to codec. + GMPVideoCodec codecParams; + memset(&codecParams, 0, sizeof(codecParams)); + + codecParams.mGMPApiVersion = 33; + codecParams.mStartBitrate = aCodecSettings->startBitrate; + codecParams.mMinBitrate = aCodecSettings->minBitrate; + codecParams.mMaxBitrate = aCodecSettings->maxBitrate; + codecParams.mMaxFramerate = aCodecSettings->maxFramerate; + mMaxPayloadSize = aMaxPayloadSize; + + memset(&mCodecSpecificInfo, 0, sizeof(webrtc::CodecSpecificInfo)); + mCodecSpecificInfo.codecType = webrtc::kVideoCodecH264; + mCodecSpecificInfo.codecSpecific.H264.packetizationMode = aCodecSettings->codecSpecific.H264.packetizationMode; + if (mCodecSpecificInfo.codecSpecific.H264.packetizationMode == 1) { + mMaxPayloadSize = 0; // No limit. + } + + if (aCodecSettings->mode == webrtc::kScreensharing) { + codecParams.mMode = kGMPScreensharing; + } else { + codecParams.mMode = kGMPRealtimeVideo; + } + + codecParams.mWidth = aCodecSettings->width; + codecParams.mHeight = aCodecSettings->height; + + RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle)); + mGMPThread->Dispatch(WrapRunnableNM(WebrtcGmpVideoEncoder::InitEncode_g, + RefPtr<WebrtcGmpVideoEncoder>(this), + codecParams, + aNumberOfCores, + aMaxPayloadSize, + initDone), + NS_DISPATCH_NORMAL); + + // Since init of the GMP encoder is a multi-step async dispatch (including + // dispatches to main), and since this function is invoked on main, there's + // no safe way to block until this init is done. If an error occurs, we'll + // handle it later. + return WEBRTC_VIDEO_CODEC_OK; +} + +/* static */ +void +WebrtcGmpVideoEncoder::InitEncode_g( + const RefPtr<WebrtcGmpVideoEncoder>& aThis, + const GMPVideoCodec& aCodecParams, + int32_t aNumberOfCores, + uint32_t aMaxPayloadSize, + const RefPtr<GmpInitDoneRunnable>& aInitDone) +{ + nsTArray<nsCString> tags; + tags.AppendElement(NS_LITERAL_CSTRING("h264")); + UniquePtr<GetGMPVideoEncoderCallback> callback( + new InitDoneCallback(aThis, aInitDone, aCodecParams, aMaxPayloadSize)); + aThis->mInitting = true; + nsresult rv = aThis->mMPS->GetGMPVideoEncoder(nullptr, + &tags, + NS_LITERAL_CSTRING(""), + Move(callback)); + if (NS_WARN_IF(NS_FAILED(rv))) { + LOGD(("GMP Encode: GetGMPVideoEncoder failed")); + aThis->Close_g(); + aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR, + "GMP Encode: GetGMPVideoEncoder failed"); + } +} + +int32_t +WebrtcGmpVideoEncoder::GmpInitDone(GMPVideoEncoderProxy* aGMP, + GMPVideoHost* aHost, + std::string* aErrorOut) +{ + if (!mInitting || !aGMP || !aHost) { + *aErrorOut = "GMP Encode: Either init was aborted, " + "or init failed to supply either a GMP Encoder or GMP host."; + if (aGMP) { + // This could destroy us, since aGMP may be the last thing holding a ref + // Return immediately. + aGMP->Close(); + } + return WEBRTC_VIDEO_CODEC_ERROR; + } + + mInitting = false; + + if (mGMP && mGMP != aGMP) { + Close_g(); + } + + mGMP = aGMP; + mHost = aHost; + mCachedPluginId = mGMP->GetPluginId(); + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoEncoder::GmpInitDone(GMPVideoEncoderProxy* aGMP, + GMPVideoHost* aHost, + const GMPVideoCodec& aCodecParams, + uint32_t aMaxPayloadSize, + std::string* aErrorOut) +{ + int32_t r = GmpInitDone(aGMP, aHost, aErrorOut); + if (r != WEBRTC_VIDEO_CODEC_OK) { + // We might have been destroyed if GmpInitDone failed. + // Return immediately. + return r; + } + mCodecParams = aCodecParams; + return InitEncoderForSize(aCodecParams.mWidth, + aCodecParams.mHeight, + aErrorOut); +} + +void +WebrtcGmpVideoEncoder::Close_g() +{ + GMPVideoEncoderProxy* gmp(mGMP); + mGMP = nullptr; + mHost = nullptr; + mInitting = false; + + if (gmp) { + // Do this last, since this could cause us to be destroyed + gmp->Close(); + } +} + +int32_t +WebrtcGmpVideoEncoder::InitEncoderForSize(unsigned short aWidth, + unsigned short aHeight, + std::string* aErrorOut) +{ + mCodecParams.mWidth = aWidth; + mCodecParams.mHeight = aHeight; + // Pass dummy codecSpecific data for now... + nsTArray<uint8_t> codecSpecific; + + GMPErr err = mGMP->InitEncode(mCodecParams, codecSpecific, this, 1, mMaxPayloadSize); + if (err != GMPNoErr) { + *aErrorOut = "GMP Encode: InitEncode failed"; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + return WEBRTC_VIDEO_CODEC_OK; +} + + +int32_t +WebrtcGmpVideoEncoder::Encode(const webrtc::I420VideoFrame& aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes) +{ + MOZ_ASSERT(aInputImage.width() >= 0 && aInputImage.height() >= 0); + // Would be really nice to avoid this sync dispatch, but it would require a + // copy of the frame, since it doesn't appear to actually have a refcount. + mGMPThread->Dispatch( + WrapRunnable(this, + &WebrtcGmpVideoEncoder::Encode_g, + &aInputImage, + aCodecSpecificInfo, + aFrameTypes), + NS_DISPATCH_SYNC); + + return WEBRTC_VIDEO_CODEC_OK; +} + +void +WebrtcGmpVideoEncoder::RegetEncoderForResolutionChange( + uint32_t aWidth, + uint32_t aHeight, + const RefPtr<GmpInitDoneRunnable>& aInitDone) +{ + Close_g(); + + UniquePtr<GetGMPVideoEncoderCallback> callback( + new InitDoneForResolutionChangeCallback(this, + aInitDone, + aWidth, + aHeight)); + + // OpenH264 codec (at least) can't handle dynamic input resolution changes + // re-init the plugin when the resolution changes + // XXX allow codec to indicate it doesn't need re-init! + nsTArray<nsCString> tags; + tags.AppendElement(NS_LITERAL_CSTRING("h264")); + mInitting = true; + if (NS_WARN_IF(NS_FAILED(mMPS->GetGMPVideoEncoder(nullptr, + &tags, + NS_LITERAL_CSTRING(""), + Move(callback))))) { + aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR, + "GMP Encode: GetGMPVideoEncoder failed"); + } +} + +int32_t +WebrtcGmpVideoEncoder::Encode_g(const webrtc::I420VideoFrame* aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes) +{ + if (!mGMP) { + // destroyed via Terminate(), failed to init, or just not initted yet + LOGD(("GMP Encode: not initted yet")); + return WEBRTC_VIDEO_CODEC_ERROR; + } + MOZ_ASSERT(mHost); + + if (static_cast<uint32_t>(aInputImage->width()) != mCodecParams.mWidth || + static_cast<uint32_t>(aInputImage->height()) != mCodecParams.mHeight) { + LOGD(("GMP Encode: resolution change from %ux%u to %dx%d", + mCodecParams.mWidth, mCodecParams.mHeight, aInputImage->width(), aInputImage->height())); + + RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle)); + RegetEncoderForResolutionChange(aInputImage->width(), + aInputImage->height(), + initDone); + if (!mGMP) { + // We needed to go async to re-get the encoder. Bail. + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + + GMPVideoFrame* ftmp = nullptr; + GMPErr err = mHost->CreateFrame(kGMPI420VideoFrame, &ftmp); + if (err != GMPNoErr) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + GMPUniquePtr<GMPVideoi420Frame> frame(static_cast<GMPVideoi420Frame*>(ftmp)); + + err = frame->CreateFrame(aInputImage->allocated_size(webrtc::kYPlane), + aInputImage->buffer(webrtc::kYPlane), + aInputImage->allocated_size(webrtc::kUPlane), + aInputImage->buffer(webrtc::kUPlane), + aInputImage->allocated_size(webrtc::kVPlane), + aInputImage->buffer(webrtc::kVPlane), + aInputImage->width(), + aInputImage->height(), + aInputImage->stride(webrtc::kYPlane), + aInputImage->stride(webrtc::kUPlane), + aInputImage->stride(webrtc::kVPlane)); + if (err != GMPNoErr) { + return err; + } + frame->SetTimestamp((aInputImage->timestamp() * 1000ll)/90); // note: rounds down! + //frame->SetDuration(1000000ll/30); // XXX base duration on measured current FPS - or don't bother + + // Bug XXXXXX: Set codecSpecific info + GMPCodecSpecificInfo info; + memset(&info, 0, sizeof(info)); + info.mCodecType = kGMPVideoCodecH264; + nsTArray<uint8_t> codecSpecificInfo; + codecSpecificInfo.AppendElements((uint8_t*)&info, sizeof(GMPCodecSpecificInfo)); + + nsTArray<GMPVideoFrameType> gmp_frame_types; + for (auto it = aFrameTypes->begin(); it != aFrameTypes->end(); ++it) { + GMPVideoFrameType ft; + + int32_t ret = WebrtcFrameTypeToGmpFrameType(*it, &ft); + if (ret != WEBRTC_VIDEO_CODEC_OK) { + return ret; + } + + gmp_frame_types.AppendElement(ft); + } + + LOGD(("GMP Encode: %llu", (aInputImage->timestamp() * 1000ll)/90)); + err = mGMP->Encode(Move(frame), codecSpecificInfo, gmp_frame_types); + if (err != GMPNoErr) { + return err; + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoEncoder::RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* aCallback) +{ + MutexAutoLock lock(mCallbackMutex); + mCallback = aCallback; + + return WEBRTC_VIDEO_CODEC_OK; +} + +/* static */ void +WebrtcGmpVideoEncoder::ReleaseGmp_g(RefPtr<WebrtcGmpVideoEncoder>& aEncoder) +{ + aEncoder->Close_g(); +} + +int32_t +WebrtcGmpVideoEncoder::ReleaseGmp() +{ + LOGD(("GMP Released:")); + if (mGMPThread) { + mGMPThread->Dispatch( + WrapRunnableNM(&WebrtcGmpVideoEncoder::ReleaseGmp_g, + RefPtr<WebrtcGmpVideoEncoder>(this)), + NS_DISPATCH_NORMAL); + } + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoEncoder::SetChannelParameters(uint32_t aPacketLoss, int aRTT) +{ + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoEncoder::SetRates(uint32_t aNewBitRate, uint32_t aFrameRate) +{ + MOZ_ASSERT(mGMPThread); + if (aFrameRate == 0) { + aFrameRate = 30; // Assume 30fps if we don't know the rate + } + mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoEncoder::SetRates_g, + RefPtr<WebrtcGmpVideoEncoder>(this), + aNewBitRate, + aFrameRate), + NS_DISPATCH_NORMAL); + + return WEBRTC_VIDEO_CODEC_OK; +} + +/* static */ int32_t +WebrtcGmpVideoEncoder::SetRates_g(RefPtr<WebrtcGmpVideoEncoder> aThis, + uint32_t aNewBitRate, + uint32_t aFrameRate) +{ + if (!aThis->mGMP) { + // destroyed via Terminate() + return WEBRTC_VIDEO_CODEC_ERROR; + } + + GMPErr err = aThis->mGMP->SetRates(aNewBitRate, aFrameRate); + if (err != GMPNoErr) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +// GMPVideoEncoderCallback virtual functions. +void +WebrtcGmpVideoEncoder::Terminated() +{ + LOGD(("GMP Encoder Terminated: %p", (void *)this)); + + mGMP->Close(); + mGMP = nullptr; + mHost = nullptr; + mInitting = false; + // Could now notify that it's dead +} + +void +WebrtcGmpVideoEncoder::Encoded(GMPVideoEncodedFrame* aEncodedFrame, + const nsTArray<uint8_t>& aCodecSpecificInfo) +{ + MutexAutoLock lock(mCallbackMutex); + if (mCallback) { + webrtc::VideoFrameType ft; + GmpFrameTypeToWebrtcFrameType(aEncodedFrame->FrameType(), &ft); + uint32_t timestamp = (aEncodedFrame->TimeStamp() * 90ll + 999)/1000; + + LOGD(("GMP Encoded: %llu, type %d, len %d", + aEncodedFrame->TimeStamp(), + aEncodedFrame->BufferType(), + aEncodedFrame->Size())); + + // Right now makes one Encoded() callback per unit + // XXX convert to FragmentationHeader format (array of offsets and sizes plus a buffer) in + // combination with H264 packetization changes in webrtc/trunk code + uint8_t *buffer = aEncodedFrame->Buffer(); + uint8_t *end = aEncodedFrame->Buffer() + aEncodedFrame->Size(); + size_t size_bytes; + switch (aEncodedFrame->BufferType()) { + case GMP_BufferSingle: + size_bytes = 0; + break; + case GMP_BufferLength8: + size_bytes = 1; + break; + case GMP_BufferLength16: + size_bytes = 2; + break; + case GMP_BufferLength24: + size_bytes = 3; + break; + case GMP_BufferLength32: + size_bytes = 4; + break; + default: + // Really that it's not in the enum + LOG(LogLevel::Error, + ("GMP plugin returned incorrect type (%d)", aEncodedFrame->BufferType())); + // XXX Bug 1041232 - need a better API for interfacing to the + // plugin so we can kill it here + return; + } + + struct nal_entry { + uint32_t offset; + uint32_t size; + }; + AutoTArray<nal_entry, 1> nals; + uint32_t size; + // make sure we don't read past the end of the buffer getting the size + while (buffer+size_bytes < end) { + switch (aEncodedFrame->BufferType()) { + case GMP_BufferSingle: + size = aEncodedFrame->Size(); + break; + case GMP_BufferLength8: + size = *buffer++; + break; + case GMP_BufferLength16: + // presumes we can do unaligned loads + size = *(reinterpret_cast<uint16_t*>(buffer)); + buffer += 2; + break; + case GMP_BufferLength24: + // 24-bits is a pain, since byte-order issues make things painful + // I'm going to define 24-bit as little-endian always; big-endian must convert + size = ((uint32_t) *buffer) | + (((uint32_t) *(buffer+1)) << 8) | + (((uint32_t) *(buffer+2)) << 16); + buffer += 3; + break; + case GMP_BufferLength32: + // presumes we can do unaligned loads + size = *(reinterpret_cast<uint32_t*>(buffer)); + buffer += 4; + break; + default: + MOZ_CRASH("GMP_BufferType already handled in switch above"); + } + if (buffer+size > end) { + // XXX see above - should we kill the plugin for returning extra bytes? Probably + LOG(LogLevel::Error, + ("GMP plugin returned badly formatted encoded data: end is %td bytes past buffer end", + buffer+size - end)); + return; + } + // XXX optimize by making buffer an offset + nal_entry nal = {((uint32_t) (buffer-aEncodedFrame->Buffer())), (uint32_t) size}; + nals.AppendElement(nal); + buffer += size; + // on last one, buffer == end normally + } + if (buffer != end) { + // At most 3 bytes can be left over, depending on buffertype + LOGD(("GMP plugin returned %td extra bytes", end - buffer)); + } + + size_t num_nals = nals.Length(); + if (num_nals > 0) { + webrtc::RTPFragmentationHeader fragmentation; + fragmentation.VerifyAndAllocateFragmentationHeader(num_nals); + for (size_t i = 0; i < num_nals; i++) { + fragmentation.fragmentationOffset[i] = nals[i].offset; + fragmentation.fragmentationLength[i] = nals[i].size; + } + + webrtc::EncodedImage unit(aEncodedFrame->Buffer(), size, size); + unit._frameType = ft; + unit._timeStamp = timestamp; + // Ensure we ignore this when calculating RTCP timestamps + unit.capture_time_ms_ = -1; + unit._completeFrame = true; + + // TODO: Currently the OpenH264 codec does not preserve any codec + // specific info passed into it and just returns default values. + // If this changes in the future, it would be nice to get rid of + // mCodecSpecificInfo. + mCallback->Encoded(unit, &mCodecSpecificInfo, &fragmentation); + } + } +} + +// Decoder. +WebrtcGmpVideoDecoder::WebrtcGmpVideoDecoder() : + mGMP(nullptr), + mInitting(false), + mHost(nullptr), + mCallbackMutex("WebrtcGmpVideoDecoder decoded callback mutex"), + mCallback(nullptr), + mCachedPluginId(0), + mDecoderStatus(GMPNoErr) +{ +#ifdef MOZILLA_INTERNAL_API + if (mPCHandle.empty()) { + mPCHandle = WebrtcGmpPCHandleSetter::GetCurrentHandle(); + } + MOZ_ASSERT(!mPCHandle.empty()); +#endif +} + +WebrtcGmpVideoDecoder::~WebrtcGmpVideoDecoder() +{ + // We should not have been destroyed if we never closed our GMP + MOZ_ASSERT(!mGMP); +} + +int32_t +WebrtcGmpVideoDecoder::InitDecode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores) +{ + if (!mMPS) { + mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1"); + } + MOZ_ASSERT(mMPS); + + if (!mGMPThread) { + if (NS_WARN_IF(NS_FAILED(mMPS->GetThread(getter_AddRefs(mGMPThread))))) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + + RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle)); + mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoDecoder::InitDecode_g, + RefPtr<WebrtcGmpVideoDecoder>(this), + aCodecSettings, + aNumberOfCores, + initDone), + NS_DISPATCH_NORMAL); + + return WEBRTC_VIDEO_CODEC_OK; +} + +/* static */ void +WebrtcGmpVideoDecoder::InitDecode_g( + const RefPtr<WebrtcGmpVideoDecoder>& aThis, + const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores, + const RefPtr<GmpInitDoneRunnable>& aInitDone) +{ + nsTArray<nsCString> tags; + tags.AppendElement(NS_LITERAL_CSTRING("h264")); + UniquePtr<GetGMPVideoDecoderCallback> callback( + new InitDoneCallback(aThis, aInitDone)); + aThis->mInitting = true; + nsresult rv = aThis->mMPS->GetGMPVideoDecoder(nullptr, + &tags, + NS_LITERAL_CSTRING(""), + Move(callback)); + if (NS_WARN_IF(NS_FAILED(rv))) { + LOGD(("GMP Decode: GetGMPVideoDecoder failed")); + aThis->Close_g(); + aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR, + "GMP Decode: GetGMPVideoDecoder failed."); + } +} + +int32_t +WebrtcGmpVideoDecoder::GmpInitDone(GMPVideoDecoderProxy* aGMP, + GMPVideoHost* aHost, + std::string* aErrorOut) +{ + if (!mInitting || !aGMP || !aHost) { + *aErrorOut = "GMP Decode: Either init was aborted, " + "or init failed to supply either a GMP decoder or GMP host."; + if (aGMP) { + // This could destroy us, since aGMP may be the last thing holding a ref + // Return immediately. + aGMP->Close(); + } + return WEBRTC_VIDEO_CODEC_ERROR; + } + + mInitting = false; + + if (mGMP && mGMP != aGMP) { + Close_g(); + } + + mGMP = aGMP; + mHost = aHost; + mCachedPluginId = mGMP->GetPluginId(); + // Bug XXXXXX: transfer settings from codecSettings to codec. + GMPVideoCodec codec; + memset(&codec, 0, sizeof(codec)); + codec.mGMPApiVersion = 33; + + // XXX this is currently a hack + //GMPVideoCodecUnion codecSpecific; + //memset(&codecSpecific, 0, sizeof(codecSpecific)); + nsTArray<uint8_t> codecSpecific; + nsresult rv = mGMP->InitDecode(codec, codecSpecific, this, 1); + if (NS_FAILED(rv)) { + *aErrorOut = "GMP Decode: InitDecode failed"; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +void +WebrtcGmpVideoDecoder::Close_g() +{ + GMPVideoDecoderProxy* gmp(mGMP); + mGMP = nullptr; + mHost = nullptr; + mInitting = false; + + if (gmp) { + // Do this last, since this could cause us to be destroyed + gmp->Close(); + } +} + +int32_t +WebrtcGmpVideoDecoder::Decode(const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + int64_t aRenderTimeMs) +{ + int32_t ret; + MOZ_ASSERT(mGMPThread); + MOZ_ASSERT(!NS_IsMainThread()); + // Would be really nice to avoid this sync dispatch, but it would require a + // copy of the frame, since it doesn't appear to actually have a refcount. + mozilla::SyncRunnable::DispatchToThread(mGMPThread, + WrapRunnableRet(&ret, this, + &WebrtcGmpVideoDecoder::Decode_g, + aInputImage, + aMissingFrames, + aFragmentation, + aCodecSpecificInfo, + aRenderTimeMs)); + + return ret; +} + +int32_t +WebrtcGmpVideoDecoder::Decode_g(const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + int64_t aRenderTimeMs) +{ + if (!mGMP) { + // destroyed via Terminate(), failed to init, or just not initted yet + LOGD(("GMP Decode: not initted yet")); + return WEBRTC_VIDEO_CODEC_ERROR; + } + MOZ_ASSERT(mHost); + + if (!aInputImage._length) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + GMPVideoFrame* ftmp = nullptr; + GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp); + if (err != GMPNoErr) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp)); + err = frame->CreateEmptyFrame(aInputImage._length); + if (err != GMPNoErr) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + // XXX At this point, we only will get mode1 data (a single length and a buffer) + // Session_info.cc/etc code needs to change to support mode 0. + *(reinterpret_cast<uint32_t*>(frame->Buffer())) = frame->Size(); + + // XXX It'd be wonderful not to have to memcpy the encoded data! + memcpy(frame->Buffer()+4, aInputImage._buffer+4, frame->Size()-4); + + frame->SetEncodedWidth(aInputImage._encodedWidth); + frame->SetEncodedHeight(aInputImage._encodedHeight); + frame->SetTimeStamp((aInputImage._timeStamp * 1000ll)/90); // rounds down + frame->SetCompleteFrame(aInputImage._completeFrame); + frame->SetBufferType(GMP_BufferLength32); + + GMPVideoFrameType ft; + int32_t ret = WebrtcFrameTypeToGmpFrameType(aInputImage._frameType, &ft); + if (ret != WEBRTC_VIDEO_CODEC_OK) { + return ret; + } + + // Bug XXXXXX: Set codecSpecific info + GMPCodecSpecificInfo info; + memset(&info, 0, sizeof(info)); + info.mCodecType = kGMPVideoCodecH264; + info.mCodecSpecific.mH264.mSimulcastIdx = 0; + nsTArray<uint8_t> codecSpecificInfo; + codecSpecificInfo.AppendElements((uint8_t*)&info, sizeof(GMPCodecSpecificInfo)); + + LOGD(("GMP Decode: %llu, len %d", frame->TimeStamp(), aInputImage._length)); + nsresult rv = mGMP->Decode(Move(frame), + aMissingFrames, + codecSpecificInfo, + aRenderTimeMs); + if (NS_FAILED(rv)) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + if(mDecoderStatus != GMPNoErr){ + mDecoderStatus = GMPNoErr; + return WEBRTC_VIDEO_CODEC_ERROR; + } + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoDecoder::RegisterDecodeCompleteCallback( webrtc::DecodedImageCallback* aCallback) +{ + MutexAutoLock lock(mCallbackMutex); + mCallback = aCallback; + + return WEBRTC_VIDEO_CODEC_OK; +} + + +/* static */ void +WebrtcGmpVideoDecoder::ReleaseGmp_g(RefPtr<WebrtcGmpVideoDecoder>& aDecoder) +{ + aDecoder->Close_g(); +} + +int32_t +WebrtcGmpVideoDecoder::ReleaseGmp() +{ + LOGD(("GMP Released:")); + RegisterDecodeCompleteCallback(nullptr); + + if (mGMPThread) { + mGMPThread->Dispatch( + WrapRunnableNM(&WebrtcGmpVideoDecoder::ReleaseGmp_g, + RefPtr<WebrtcGmpVideoDecoder>(this)), + NS_DISPATCH_NORMAL); + } + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcGmpVideoDecoder::Reset() +{ + // XXX ? + return WEBRTC_VIDEO_CODEC_OK; +} + +void +WebrtcGmpVideoDecoder::Terminated() +{ + LOGD(("GMP Decoder Terminated: %p", (void *)this)); + + mGMP->Close(); + mGMP = nullptr; + mHost = nullptr; + mInitting = false; + // Could now notify that it's dead +} + +void +WebrtcGmpVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame) +{ + MutexAutoLock lock(mCallbackMutex); + if (mCallback) { + webrtc::I420VideoFrame image; + int ret = image.CreateFrame(aDecodedFrame->Buffer(kGMPYPlane), + aDecodedFrame->Buffer(kGMPUPlane), + aDecodedFrame->Buffer(kGMPVPlane), + aDecodedFrame->Width(), + aDecodedFrame->Height(), + aDecodedFrame->Stride(kGMPYPlane), + aDecodedFrame->Stride(kGMPUPlane), + aDecodedFrame->Stride(kGMPVPlane)); + if (ret != 0) { + return; + } + image.set_timestamp((aDecodedFrame->Timestamp() * 90ll + 999)/1000); // round up + image.set_render_time_ms(0); + + LOGD(("GMP Decoded: %llu", aDecodedFrame->Timestamp())); + mCallback->Decoded(image); + } + aDecodedFrame->Destroy(); +} + +} diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h new file mode 100644 index 000000000..0c01bf53c --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2012, The WebRTC project authors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * * Neither the name of Google nor the names of its contributors may + * be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WEBRTCGMPVIDEOCODEC_H_ +#define WEBRTCGMPVIDEOCODEC_H_ + +#include <iostream> +#include <queue> +#include <string> + +#include "nsThreadUtils.h" +#include "mozilla/Monitor.h" +#include "mozilla/Mutex.h" + +#include "mozIGeckoMediaPluginService.h" +#include "MediaConduitInterface.h" +#include "AudioConduit.h" +#include "VideoConduit.h" +#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h" + +#include "gmp-video-host.h" +#include "GMPVideoDecoderProxy.h" +#include "GMPVideoEncoderProxy.h" + +#include "PeerConnectionImpl.h" + +namespace mozilla { + +// Class that allows code on the other side of webrtc.org to tell +// WebrtcGmpVideoEncoder/Decoder what PC they should send errors to. +// This is necessary because webrtc.org gives us no way to plumb the handle +// through, nor does it give us any way to inform it of an error that will +// make it back to the PC that cares (except for errors encountered +// synchronously in functions like InitEncode/Decode, which will not happen +// because GMP init is async). +// Right now, this is used in MediaPipelineFactory. +class WebrtcGmpPCHandleSetter +{ + public: + explicit WebrtcGmpPCHandleSetter(const std::string& aPCHandle); + + ~WebrtcGmpPCHandleSetter(); + + static std::string GetCurrentHandle(); + + private: + static std::string sCurrentHandle; +}; + +class GmpInitDoneRunnable : public Runnable +{ + public: + explicit GmpInitDoneRunnable(const std::string& aPCHandle) : + mResult(WEBRTC_VIDEO_CODEC_OK), + mPCHandle(aPCHandle) + { + } + + NS_IMETHOD Run() override + { + if (mResult == WEBRTC_VIDEO_CODEC_OK) { + // Might be useful to notify the PeerConnection about successful init + // someday. + return NS_OK; + } + + PeerConnectionWrapper wrapper(mPCHandle); + if (wrapper.impl()) { + wrapper.impl()->OnMediaError(mError); + } + return NS_OK; + } + + void Dispatch(int32_t aResult, const std::string& aError = "") + { + mResult = aResult; + mError = aError; + nsCOMPtr<nsIThread> mainThread(do_GetMainThread()); + if (mainThread) { + // For some reason, the compiler on CI is treating |this| as a const + // pointer, despite the fact that we're in a non-const function. And, + // interestingly enough, correcting this doesn't require a const_cast. + mainThread->Dispatch(do_AddRef(static_cast<nsIRunnable*>(this)), + NS_DISPATCH_NORMAL); + } + } + + int32_t Result() + { + return mResult; + } + + private: + int32_t mResult; + std::string mPCHandle; + std::string mError; +}; + +class WebrtcGmpVideoEncoder : public GMPVideoEncoderCallbackProxy +{ +public: + WebrtcGmpVideoEncoder(); + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcGmpVideoEncoder); + + // Implement VideoEncoder interface, sort of. + // (We cannot use |Release|, since that's needed for nsRefPtr) + virtual uint64_t PluginID() const + { + return mCachedPluginId; + } + + virtual int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores, + uint32_t aMaxPayloadSize); + + virtual int32_t Encode(const webrtc::I420VideoFrame& aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes); + + virtual int32_t RegisterEncodeCompleteCallback( + webrtc::EncodedImageCallback* aCallback); + + virtual int32_t ReleaseGmp(); + + virtual int32_t SetChannelParameters(uint32_t aPacketLoss, + int aRTT); + + virtual int32_t SetRates(uint32_t aNewBitRate, + uint32_t aFrameRate); + + // GMPVideoEncoderCallback virtual functions. + virtual void Terminated() override; + + virtual void Encoded(GMPVideoEncodedFrame* aEncodedFrame, + const nsTArray<uint8_t>& aCodecSpecificInfo) override; + + virtual void Error(GMPErr aError) override { + } + +private: + virtual ~WebrtcGmpVideoEncoder(); + + static void InitEncode_g(const RefPtr<WebrtcGmpVideoEncoder>& aThis, + const GMPVideoCodec& aCodecParams, + int32_t aNumberOfCores, + uint32_t aMaxPayloadSize, + const RefPtr<GmpInitDoneRunnable>& aInitDone); + int32_t GmpInitDone(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost, + const GMPVideoCodec& aCodecParams, + uint32_t aMaxPayloadSize, + std::string* aErrorOut); + int32_t GmpInitDone(GMPVideoEncoderProxy* aGMP, + GMPVideoHost* aHost, + std::string* aErrorOut); + int32_t InitEncoderForSize(unsigned short aWidth, + unsigned short aHeight, + std::string* aErrorOut); + static void ReleaseGmp_g(RefPtr<WebrtcGmpVideoEncoder>& aEncoder); + void Close_g(); + + class InitDoneCallback : public GetGMPVideoEncoderCallback + { + public: + InitDoneCallback(const RefPtr<WebrtcGmpVideoEncoder>& aEncoder, + const RefPtr<GmpInitDoneRunnable>& aInitDone, + const GMPVideoCodec& aCodecParams, + uint32_t aMaxPayloadSize) + : mEncoder(aEncoder), + mInitDone(aInitDone), + mCodecParams(aCodecParams), + mMaxPayloadSize(aMaxPayloadSize) + { + } + + virtual void Done(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost) override + { + std::string errorOut; + int32_t result = mEncoder->GmpInitDone(aGMP, + aHost, + mCodecParams, + mMaxPayloadSize, + &errorOut); + + mInitDone->Dispatch(result, errorOut); + } + + private: + RefPtr<WebrtcGmpVideoEncoder> mEncoder; + RefPtr<GmpInitDoneRunnable> mInitDone; + GMPVideoCodec mCodecParams; + uint32_t mMaxPayloadSize; + }; + + int32_t Encode_g(const webrtc::I420VideoFrame* aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes); + void RegetEncoderForResolutionChange( + uint32_t aWidth, + uint32_t aHeight, + const RefPtr<GmpInitDoneRunnable>& aInitDone); + + class InitDoneForResolutionChangeCallback : public GetGMPVideoEncoderCallback + { + public: + InitDoneForResolutionChangeCallback( + const RefPtr<WebrtcGmpVideoEncoder>& aEncoder, + const RefPtr<GmpInitDoneRunnable>& aInitDone, + uint32_t aWidth, + uint32_t aHeight) + : mEncoder(aEncoder), + mInitDone(aInitDone), + mWidth(aWidth), + mHeight(aHeight) + { + } + + virtual void Done(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost) override + { + std::string errorOut; + int32_t result = mEncoder->GmpInitDone(aGMP, aHost, &errorOut); + if (result != WEBRTC_VIDEO_CODEC_OK) { + mInitDone->Dispatch(result, errorOut); + return; + } + + result = mEncoder->InitEncoderForSize(mWidth, mHeight, &errorOut); + mInitDone->Dispatch(result, errorOut); + } + + private: + RefPtr<WebrtcGmpVideoEncoder> mEncoder; + RefPtr<GmpInitDoneRunnable> mInitDone; + uint32_t mWidth; + uint32_t mHeight; + }; + + static int32_t SetRates_g(RefPtr<WebrtcGmpVideoEncoder> aThis, + uint32_t aNewBitRate, + uint32_t aFrameRate); + + nsCOMPtr<mozIGeckoMediaPluginService> mMPS; + nsCOMPtr<nsIThread> mGMPThread; + GMPVideoEncoderProxy* mGMP; + // Used to handle a race where Release() is called while init is in progress + bool mInitting; + GMPVideoHost* mHost; + GMPVideoCodec mCodecParams; + uint32_t mMaxPayloadSize; + webrtc::CodecSpecificInfo mCodecSpecificInfo; + // Protects mCallback + Mutex mCallbackMutex; + webrtc::EncodedImageCallback* mCallback; + uint64_t mCachedPluginId; + std::string mPCHandle; +}; + + +// Basically a strong ref to a WebrtcGmpVideoEncoder, that also translates +// from Release() to WebrtcGmpVideoEncoder::ReleaseGmp(), since we need +// WebrtcGmpVideoEncoder::Release() for managing the refcount. +// The webrtc.org code gets one of these, so it doesn't unilaterally delete +// the "real" encoder. +class WebrtcVideoEncoderProxy : public WebrtcVideoEncoder +{ + public: + WebrtcVideoEncoderProxy() : + mEncoderImpl(new WebrtcGmpVideoEncoder) + {} + + virtual ~WebrtcVideoEncoderProxy() + { + RegisterEncodeCompleteCallback(nullptr); + } + + uint64_t PluginID() const override + { + return mEncoderImpl->PluginID(); + } + + int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores, + size_t aMaxPayloadSize) override + { + return mEncoderImpl->InitEncode(aCodecSettings, + aNumberOfCores, + aMaxPayloadSize); + } + + int32_t Encode( + const webrtc::I420VideoFrame& aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes) override + { + return mEncoderImpl->Encode(aInputImage, + aCodecSpecificInfo, + aFrameTypes); + } + + int32_t RegisterEncodeCompleteCallback( + webrtc::EncodedImageCallback* aCallback) override + { + return mEncoderImpl->RegisterEncodeCompleteCallback(aCallback); + } + + int32_t Release() override + { + return mEncoderImpl->ReleaseGmp(); + } + + int32_t SetChannelParameters(uint32_t aPacketLoss, + int64_t aRTT) override + { + return mEncoderImpl->SetChannelParameters(aPacketLoss, aRTT); + } + + int32_t SetRates(uint32_t aNewBitRate, + uint32_t aFrameRate) override + { + return mEncoderImpl->SetRates(aNewBitRate, aFrameRate); + } + + private: + RefPtr<WebrtcGmpVideoEncoder> mEncoderImpl; +}; + +class WebrtcGmpVideoDecoder : public GMPVideoDecoderCallbackProxy +{ +public: + WebrtcGmpVideoDecoder(); + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcGmpVideoDecoder); + + // Implement VideoEncoder interface, sort of. + // (We cannot use |Release|, since that's needed for nsRefPtr) + virtual uint64_t PluginID() const + { + return mCachedPluginId; + } + + virtual int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores); + virtual int32_t Decode(const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + int64_t aRenderTimeMs); + virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* aCallback); + + virtual int32_t ReleaseGmp(); + + virtual int32_t Reset(); + + // GMPVideoDecoderCallbackProxy + virtual void Terminated() override; + + virtual void Decoded(GMPVideoi420Frame* aDecodedFrame) override; + + virtual void ReceivedDecodedReferenceFrame(const uint64_t aPictureId) override { + MOZ_CRASH(); + } + + virtual void ReceivedDecodedFrame(const uint64_t aPictureId) override { + MOZ_CRASH(); + } + + virtual void InputDataExhausted() override { + } + + virtual void DrainComplete() override { + } + + virtual void ResetComplete() override { + } + + virtual void Error(GMPErr aError) override { + mDecoderStatus = aError; + } + +private: + virtual ~WebrtcGmpVideoDecoder(); + + static void InitDecode_g( + const RefPtr<WebrtcGmpVideoDecoder>& aThis, + const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores, + const RefPtr<GmpInitDoneRunnable>& aInitDone); + int32_t GmpInitDone(GMPVideoDecoderProxy* aGMP, + GMPVideoHost* aHost, + std::string* aErrorOut); + static void ReleaseGmp_g(RefPtr<WebrtcGmpVideoDecoder>& aDecoder); + void Close_g(); + + class InitDoneCallback : public GetGMPVideoDecoderCallback + { + public: + explicit InitDoneCallback(const RefPtr<WebrtcGmpVideoDecoder>& aDecoder, + const RefPtr<GmpInitDoneRunnable>& aInitDone) + : mDecoder(aDecoder), + mInitDone(aInitDone) + { + } + + virtual void Done(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost) + { + std::string errorOut; + int32_t result = mDecoder->GmpInitDone(aGMP, aHost, &errorOut); + + mInitDone->Dispatch(result, errorOut); + } + + private: + RefPtr<WebrtcGmpVideoDecoder> mDecoder; + RefPtr<GmpInitDoneRunnable> mInitDone; + }; + + virtual int32_t Decode_g(const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + int64_t aRenderTimeMs); + + nsCOMPtr<mozIGeckoMediaPluginService> mMPS; + nsCOMPtr<nsIThread> mGMPThread; + GMPVideoDecoderProxy* mGMP; // Addref is held for us + // Used to handle a race where Release() is called while init is in progress + bool mInitting; + GMPVideoHost* mHost; + // Protects mCallback + Mutex mCallbackMutex; + webrtc::DecodedImageCallback* mCallback; + Atomic<uint64_t> mCachedPluginId; + GMPErr mDecoderStatus; + std::string mPCHandle; +}; + +// Basically a strong ref to a WebrtcGmpVideoDecoder, that also translates +// from Release() to WebrtcGmpVideoDecoder::ReleaseGmp(), since we need +// WebrtcGmpVideoDecoder::Release() for managing the refcount. +// The webrtc.org code gets one of these, so it doesn't unilaterally delete +// the "real" encoder. +class WebrtcVideoDecoderProxy : public WebrtcVideoDecoder +{ + public: + WebrtcVideoDecoderProxy() : + mDecoderImpl(new WebrtcGmpVideoDecoder) + {} + + virtual ~WebrtcVideoDecoderProxy() + { + RegisterDecodeCompleteCallback(nullptr); + } + + uint64_t PluginID() const override + { + return mDecoderImpl->PluginID(); + } + + int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumberOfCores) override + { + return mDecoderImpl->InitDecode(aCodecSettings, aNumberOfCores); + } + + int32_t Decode( + const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + int64_t aRenderTimeMs) override + { + return mDecoderImpl->Decode(aInputImage, + aMissingFrames, + aFragmentation, + aCodecSpecificInfo, + aRenderTimeMs); + } + + int32_t RegisterDecodeCompleteCallback( + webrtc::DecodedImageCallback* aCallback) override + { + return mDecoderImpl->RegisterDecodeCompleteCallback(aCallback); + } + + int32_t Release() override + { + return mDecoderImpl->ReleaseGmp(); + } + + int32_t Reset() override + { + return mDecoderImpl->Reset(); + } + + private: + RefPtr<WebrtcGmpVideoDecoder> mDecoderImpl; +}; + +} + +#endif diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp new file mode 100644 index 000000000..27b99d5ed --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp @@ -0,0 +1,1004 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include <cstdio> +#include <iostream> +#include <queue> + +#include "CSFLog.h" +#include "nspr.h" + +#include "MediaCodec.h" +#include "WebrtcMediaCodecVP8VideoCodec.h" +#include "AndroidJNIWrapper.h" +#include "mozilla/ArrayUtils.h" +#include "nsThreadUtils.h" +#include "mozilla/Monitor.h" +#include "runnable_utils.h" + +#include "AudioConduit.h" +#include "VideoConduit.h" +#include "libyuv/convert_from.h" +#include "libyuv/convert.h" +#include "libyuv/row.h" + +#include <webrtc/common_video/libyuv/include/webrtc_libyuv.h> + +using namespace mozilla; +using namespace mozilla::java::sdk; + +static const int32_t DECODER_TIMEOUT = 10 * PR_USEC_PER_MSEC; // 10ms +static const char MEDIACODEC_VIDEO_MIME_VP8[] = "video/x-vnd.on2.vp8"; + +namespace mozilla { + +static const char* logTag ="WebrtcMediaCodecVP8VideoCodec"; + +static MediaCodec::LocalRef CreateDecoder(const char* aMimeType) +{ + if (!aMimeType) { + return nullptr; + } + + MediaCodec::LocalRef codec; + MediaCodec::CreateDecoderByType(aMimeType, &codec); + return codec; +} + +static MediaCodec::LocalRef CreateEncoder(const char* aMimeType) +{ + if (!aMimeType) { + return nullptr; + } + + MediaCodec::LocalRef codec; + MediaCodec::CreateEncoderByType(aMimeType, &codec); + return codec; +} + +static void +ShutdownThread(nsCOMPtr<nsIThread>& aThread) +{ + aThread->Shutdown(); +} + +// Base runnable class to repeatly pull MediaCodec output buffers in seperate thread. +// How to use: +// - implementing DrainOutput() to get output. Remember to return false to tell +// drain not to pop input queue. +// - call QueueInput() to schedule a run to drain output. The input, aFrame, +// should contains corresponding info such as image size and timestamps for +// DrainOutput() implementation to construct data needed by encoded/decoded +// callbacks. +class MediaCodecOutputDrain : public Runnable +{ +public: + void Start() { + MonitorAutoLock lock(mMonitor); + if (mThread == nullptr) { + NS_NewNamedThread("OutputDrain", getter_AddRefs(mThread)); + } + mEnding = false; + mThread->Dispatch(this, NS_DISPATCH_NORMAL); + } + + void Stop() { + MonitorAutoLock lock(mMonitor); + mEnding = true; + lock.NotifyAll(); // In case Run() is waiting. + + if (mThread != nullptr) { + MonitorAutoUnlock unlock(mMonitor); + NS_DispatchToMainThread( + WrapRunnableNM<decltype(&ShutdownThread), + nsCOMPtr<nsIThread> >(&ShutdownThread, mThread)); + mThread = nullptr; + } + } + + void QueueInput(const EncodedFrame& aFrame) + { + MonitorAutoLock lock(mMonitor); + + MOZ_ASSERT(mThread); + + mInputFrames.push(aFrame); + // Notify Run() about queued input and it can start working. + lock.NotifyAll(); + } + + NS_IMETHOD Run() override + { + MOZ_ASSERT(mThread); + + MonitorAutoLock lock(mMonitor); + while (true) { + if (mInputFrames.empty()) { + // Wait for new input. + lock.Wait(); + } + + if (mEnding) { + // Stop draining. + break; + } + + MOZ_ASSERT(!mInputFrames.empty()); + { + // Release monitor while draining because it's blocking. + MonitorAutoUnlock unlock(mMonitor); + DrainOutput(); + } + } + + return NS_OK; + } + +protected: + MediaCodecOutputDrain() + : mMonitor("MediaCodecOutputDrain monitor") + , mEnding(false) + {} + + // Drain output buffer for input frame queue mInputFrames. + // mInputFrames contains info such as size and time of the input frames. + // We have to give a queue to handle encoder frame skips - we can input 10 + // frames and get one back. NOTE: any access of aInputFrames MUST be preceded + // locking mMonitor! + + // Blocks waiting for decoded buffers, but for a limited period because + // we need to check for shutdown. + virtual bool DrainOutput() = 0; + +protected: + // This monitor protects all things below it, and is also used to + // wait/notify queued input. + Monitor mMonitor; + std::queue<EncodedFrame> mInputFrames; + +private: + // also protected by mMonitor + nsCOMPtr<nsIThread> mThread; + bool mEnding; +}; + +class WebrtcAndroidMediaCodec { +public: + WebrtcAndroidMediaCodec() + : mEncoderCallback(nullptr) + , mDecoderCallback(nullptr) + , isStarted(false) + , mEnding(false) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + } + + nsresult Configure(uint32_t width, + uint32_t height, + const jobject aSurface, + uint32_t flags, + const char* mime, + bool encoder) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + nsresult res = NS_OK; + + if (!mCoder) { + mWidth = width; + mHeight = height; + + MediaFormat::LocalRef format; + + res = MediaFormat::CreateVideoFormat(nsCString(mime), + mWidth, + mHeight, + &format); + + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, CreateVideoFormat failed err = %d", __FUNCTION__, (int)res); + return NS_ERROR_FAILURE; + } + + if (encoder) { + mCoder = CreateEncoder(mime); + + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, CreateEncoderByType failed err = %d", __FUNCTION__, (int)res); + return NS_ERROR_FAILURE; + } + + res = format->SetInteger(nsCString("bitrate"), 1000*300); + res = format->SetInteger(nsCString("bitrate-mode"), 2); + res = format->SetInteger(nsCString("color-format"), 21); + res = format->SetInteger(nsCString("frame-rate"), 30); + res = format->SetInteger(nsCString("i-frame-interval"), 100); + + } else { + mCoder = CreateDecoder(mime); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, CreateDecoderByType failed err = %d", __FUNCTION__, (int)res); + return NS_ERROR_FAILURE; + } + } + res = mCoder->Configure(format, nullptr, nullptr, flags); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, err = %d", __FUNCTION__, (int)res); + } + } + + return res; + } + + nsresult Start() { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + if (!mCoder) { + return NS_ERROR_FAILURE; + } + + mEnding = false; + + nsresult res; + res = mCoder->Start(); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->start() return err = %d", + __FUNCTION__, (int)res); + return res; + } + isStarted = true; + return NS_OK; + } + + nsresult Stop() { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + mEnding = true; + + if (mOutputDrain != nullptr) { + mOutputDrain->Stop(); + mOutputDrain = nullptr; + } + + mCoder->Stop(); + mCoder->Release(); + isStarted = false; + return NS_OK; + } + + void GenerateVideoFrame( + size_t width, size_t height, uint32_t timeStamp, + void* decoded, + webrtc::I420VideoFrame* videoFrame, int color_format) { + + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + // TODO: eliminate extra pixel copy/color conversion + size_t widthUV = (width + 1) / 2; + if (videoFrame->CreateEmptyFrame(width, height, width, widthUV, widthUV)) { + return; + } + + uint8_t* src_nv12 = static_cast<uint8_t *>(decoded); + int src_nv12_y_size = width * height; + + uint8_t* dstY = videoFrame->buffer(webrtc::kYPlane); + uint8_t* dstU = videoFrame->buffer(webrtc::kUPlane); + uint8_t* dstV = videoFrame->buffer(webrtc::kVPlane); + + libyuv::NV12ToI420(src_nv12, width, + src_nv12 + src_nv12_y_size, (width + 1) & ~1, + dstY, width, + dstU, (width + 1) / 2, + dstV, + (width + 1) / 2, + width, height); + + videoFrame->set_timestamp(timeStamp); + } + + int32_t + FeedMediaCodecInput( + const webrtc::EncodedImage& inputImage, + int64_t renderTimeMs) { + +#ifdef WEBRTC_MEDIACODEC_DEBUG + uint32_t time = PR_IntervalNow(); + CSFLogDebug(logTag, "%s ", __FUNCTION__); +#endif + + int inputIndex = DequeueInputBuffer(DECODER_TIMEOUT); + if (inputIndex == -1) { + CSFLogError(logTag, "%s equeue input buffer failed", __FUNCTION__); + return inputIndex; + } + +#ifdef WEBRTC_MEDIACODEC_DEBUG + CSFLogDebug(logTag, "%s dequeue input buffer took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time)); + time = PR_IntervalNow(); +#endif + + size_t size = inputImage._length; + + JNIEnv* env = jsjni_GetJNIForThread(); + jobject buffer = env->GetObjectArrayElement(mInputBuffers, inputIndex); + void* directBuffer = env->GetDirectBufferAddress(buffer); + + PodCopy((uint8_t*)directBuffer, inputImage._buffer, size); + + if (inputIndex >= 0) { + CSFLogError(logTag, "%s queue input buffer inputIndex = %d", __FUNCTION__, inputIndex); + QueueInputBuffer(inputIndex, 0, size, renderTimeMs, 0); + + { + if (mOutputDrain == nullptr) { + mOutputDrain = new OutputDrain(this); + mOutputDrain->Start(); + } + EncodedFrame frame; + frame.width_ = mWidth; + frame.height_ = mHeight; + frame.timeStamp_ = inputImage._timeStamp; + frame.decode_timestamp_ = renderTimeMs; + mOutputDrain->QueueInput(frame); + } + env->DeleteLocalRef(buffer); + } + + return inputIndex; + } + + nsresult + DrainOutput(std::queue<EncodedFrame>& aInputFrames, Monitor& aMonitor) { + MOZ_ASSERT(mCoder != nullptr); + if (mCoder == nullptr) { + return NS_ERROR_FAILURE; + } + +#ifdef WEBRTC_MEDIACODEC_DEBUG + uint32_t time = PR_IntervalNow(); +#endif + nsresult res; + BufferInfo::LocalRef bufferInfo; + res = BufferInfo::New(&bufferInfo); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, BufferInfo::New return err = %d", + __FUNCTION__, (int)res); + return res; + } + int32_t outputIndex = DequeueOutputBuffer(bufferInfo); + + if (outputIndex == MediaCodec::INFO_TRY_AGAIN_LATER) { + // Not an error: output not available yet. Try later. + CSFLogDebug(logTag, "%s dequeue output buffer try again:%d", __FUNCTION__, outputIndex); + } else if (outputIndex == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) { + // handle format change + CSFLogDebug(logTag, "%s dequeue output buffer format changed:%d", __FUNCTION__, outputIndex); + } else if (outputIndex == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) { + CSFLogDebug(logTag, "%s dequeue output buffer changed:%d", __FUNCTION__, outputIndex); + GetOutputBuffers(); + } else if (outputIndex < 0) { + CSFLogDebug(logTag, "%s dequeue output buffer unknow error:%d", __FUNCTION__, outputIndex); + MonitorAutoLock lock(aMonitor); + aInputFrames.pop(); + } else { +#ifdef WEBRTC_MEDIACODEC_DEBUG + CSFLogDebug(logTag, "%s dequeue output buffer# return status is %d took %u ms", __FUNCTION__, outputIndex, PR_IntervalToMilliseconds(PR_IntervalNow()-time)); +#endif + EncodedFrame frame; + { + MonitorAutoLock lock(aMonitor); + frame = aInputFrames.front(); + aInputFrames.pop(); + } + + if (mEnding) { + ReleaseOutputBuffer(outputIndex, false); + return NS_OK; + } + + JNIEnv* env = jsjni_GetJNIForThread(); + jobject buffer = env->GetObjectArrayElement(mOutputBuffers, outputIndex); + if (buffer) { + // The buffer will be null on Android L if we are decoding to a Surface + void* directBuffer = env->GetDirectBufferAddress(buffer); + + int color_format = 0; + + CSFLogDebug(logTag, "%s generate video frame, width = %d, height = %d, timeStamp_ = %d", __FUNCTION__, frame.width_, frame.height_, frame.timeStamp_); + GenerateVideoFrame(frame.width_, frame.height_, frame.timeStamp_, directBuffer, &mVideoFrame, color_format); + mDecoderCallback->Decoded(mVideoFrame); + + ReleaseOutputBuffer(outputIndex, false); + env->DeleteLocalRef(buffer); + } + } + return NS_OK; + } + + int32_t DequeueInputBuffer(int64_t time) { + nsresult res; + int32_t inputIndex; + res = mCoder->DequeueInputBuffer(time, &inputIndex); + + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->DequeueInputBuffer() return err = %d", + __FUNCTION__, (int)res); + return -1; + } + return inputIndex; + } + + void QueueInputBuffer(int32_t inputIndex, int32_t offset, size_t size, int64_t renderTimes, int32_t flags) { + nsresult res = NS_OK; + res = mCoder->QueueInputBuffer(inputIndex, offset, size, renderTimes, flags); + + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->QueueInputBuffer() return err = %d", + __FUNCTION__, (int)res); + } + } + + int32_t DequeueOutputBuffer(BufferInfo::Param aInfo) { + nsresult res; + + int32_t outputStatus; + res = mCoder->DequeueOutputBuffer(aInfo, DECODER_TIMEOUT, &outputStatus); + + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->DequeueOutputBuffer() return err = %d", + __FUNCTION__, (int)res); + return -1; + } + + return outputStatus; + } + + void ReleaseOutputBuffer(int32_t index, bool flag) { + mCoder->ReleaseOutputBuffer(index, flag); + } + + jobjectArray GetInputBuffers() { + JNIEnv* env = jsjni_GetJNIForThread(); + + if (mInputBuffers) { + env->DeleteGlobalRef(mInputBuffers); + } + + nsresult res; + jni::ObjectArray::LocalRef inputBuffers; + res = mCoder->GetInputBuffers(&inputBuffers); + mInputBuffers = (jobjectArray) env->NewGlobalRef(inputBuffers.Get()); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, GetInputBuffers return err = %d", + __FUNCTION__, (int)res); + return nullptr; + } + + return mInputBuffers; + } + + jobjectArray GetOutputBuffers() { + JNIEnv* env = jsjni_GetJNIForThread(); + + if (mOutputBuffers) { + env->DeleteGlobalRef(mOutputBuffers); + } + + nsresult res; + jni::ObjectArray::LocalRef outputBuffers; + res = mCoder->GetOutputBuffers(&outputBuffers); + mOutputBuffers = (jobjectArray) env->NewGlobalRef(outputBuffers.Get()); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, GetOutputBuffers return err = %d", + __FUNCTION__, (int)res); + return nullptr; + } + + return mOutputBuffers; + } + + void SetDecoderCallback(webrtc::DecodedImageCallback* aCallback) { + mDecoderCallback = aCallback; + } + + void SetEncoderCallback(webrtc::EncodedImageCallback* aCallback) { + mEncoderCallback = aCallback; + } + +protected: + virtual ~WebrtcAndroidMediaCodec() { + } + +private: +class OutputDrain : public MediaCodecOutputDrain + { + public: + OutputDrain(WebrtcAndroidMediaCodec* aMediaCodec) + : MediaCodecOutputDrain() + , mMediaCodec(aMediaCodec) + {} + + protected: + virtual bool DrainOutput() override + { + return (mMediaCodec->DrainOutput(mInputFrames, mMonitor) == NS_OK); + } + + private: + WebrtcAndroidMediaCodec* mMediaCodec; + }; + + friend class WebrtcMediaCodecVP8VideoEncoder; + friend class WebrtcMediaCodecVP8VideoDecoder; + + MediaCodec::GlobalRef mCoder; + webrtc::EncodedImageCallback* mEncoderCallback; + webrtc::DecodedImageCallback* mDecoderCallback; + webrtc::I420VideoFrame mVideoFrame; + + jobjectArray mInputBuffers; + jobjectArray mOutputBuffers; + + RefPtr<OutputDrain> mOutputDrain; + uint32_t mWidth; + uint32_t mHeight; + bool isStarted; + bool mEnding; + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcAndroidMediaCodec) +}; + +static bool I420toNV12(uint8_t* dstY, uint16_t* dstUV, const webrtc::I420VideoFrame& inputImage) { + uint8_t* buffer = dstY; + uint8_t* dst_y = buffer; + int dst_stride_y = inputImage.stride(webrtc::kYPlane); + uint8_t* dst_uv = buffer + inputImage.stride(webrtc::kYPlane) * + inputImage.height(); + int dst_stride_uv = inputImage.stride(webrtc::kUPlane) * 2; + + // Why NV12? Because COLOR_FORMAT_YUV420_SEMIPLANAR. Most hardware is NV12-friendly. + bool converted = !libyuv::I420ToNV12(inputImage.buffer(webrtc::kYPlane), + inputImage.stride(webrtc::kYPlane), + inputImage.buffer(webrtc::kUPlane), + inputImage.stride(webrtc::kUPlane), + inputImage.buffer(webrtc::kVPlane), + inputImage.stride(webrtc::kVPlane), + dst_y, + dst_stride_y, + dst_uv, + dst_stride_uv, + inputImage.width(), + inputImage.height()); + return converted; +} + +// Encoder. +WebrtcMediaCodecVP8VideoEncoder::WebrtcMediaCodecVP8VideoEncoder() + : mTimestamp(0) + , mCallback(nullptr) + , mMediaCodecEncoder(nullptr) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + memset(&mEncodedImage, 0, sizeof(mEncodedImage)); +} + +bool WebrtcMediaCodecVP8VideoEncoder::ResetInputBuffers() { + mInputBuffers = mMediaCodecEncoder->GetInputBuffers(); + + if (!mInputBuffers) + return false; + + return true; +} + +bool WebrtcMediaCodecVP8VideoEncoder::ResetOutputBuffers() { + mOutputBuffers = mMediaCodecEncoder->GetOutputBuffers(); + + if (!mOutputBuffers) + return false; + + return true; +} + +int32_t +WebrtcMediaCodecVP8VideoEncoder::VerifyAndAllocate(const uint32_t minimumSize) +{ + if(minimumSize > mEncodedImage._size) + { + // create buffer of sufficient size + uint8_t* newBuffer = new uint8_t[minimumSize]; + if (newBuffer == nullptr) { + return -1; + } + if(mEncodedImage._buffer) { + // copy old data + memcpy(newBuffer, mEncodedImage._buffer, mEncodedImage._size); + delete [] mEncodedImage._buffer; + } + mEncodedImage._buffer = newBuffer; + mEncodedImage._size = minimumSize; + } + return 0; +} + +int32_t WebrtcMediaCodecVP8VideoEncoder::InitEncode( + const webrtc::VideoCodec* codecSettings, + int32_t numberOfCores, + size_t maxPayloadSize) { + mMaxPayloadSize = maxPayloadSize; + CSFLogDebug(logTag, "%s, w = %d, h = %d", __FUNCTION__, codecSettings->width, codecSettings->height); + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t WebrtcMediaCodecVP8VideoEncoder::Encode( + const webrtc::I420VideoFrame& inputImage, + const webrtc::CodecSpecificInfo* codecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* frame_types) { + CSFLogDebug(logTag, "%s, w = %d, h = %d", __FUNCTION__, inputImage.width(), inputImage.height()); + + if (!mMediaCodecEncoder) { + mMediaCodecEncoder = new WebrtcAndroidMediaCodec(); + } + + if (!mMediaCodecEncoder->isStarted) { + if (inputImage.width() == 0 || inputImage.height() == 0) { + return WEBRTC_VIDEO_CODEC_ERROR; + } else { + mFrameWidth = inputImage.width(); + mFrameHeight = inputImage.height(); + } + + mMediaCodecEncoder->SetEncoderCallback(mCallback); + nsresult res = mMediaCodecEncoder->Configure(mFrameWidth, mFrameHeight, nullptr, MediaCodec::CONFIGURE_FLAG_ENCODE, MEDIACODEC_VIDEO_MIME_VP8, true /* encoder */); + + if (res != NS_OK) { + CSFLogDebug(logTag, "%s, encoder configure return err = %d", + __FUNCTION__, (int)res); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + res = mMediaCodecEncoder->Start(); + + if (NS_FAILED(res)) { + mMediaCodecEncoder->isStarted = false; + CSFLogDebug(logTag, "%s start encoder. err = %d", __FUNCTION__, (int)res); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + bool retBool = ResetInputBuffers(); + if (!retBool) { + CSFLogDebug(logTag, "%s ResetInputBuffers failed.", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_ERROR; + } + retBool = ResetOutputBuffers(); + if (!retBool) { + CSFLogDebug(logTag, "%s ResetOutputBuffers failed.", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + mMediaCodecEncoder->isStarted = true; + } + +#ifdef WEBRTC_MEDIACODEC_DEBUG + uint32_t time = PR_IntervalNow(); +#endif + + size_t sizeY = inputImage.allocated_size(webrtc::kYPlane); + size_t sizeUV = inputImage.allocated_size(webrtc::kUPlane); + size_t size = sizeY + 2 * sizeUV; + + int inputIndex = mMediaCodecEncoder->DequeueInputBuffer(DECODER_TIMEOUT); + if (inputIndex == -1) { + CSFLogError(logTag, "%s dequeue input buffer failed", __FUNCTION__); + return inputIndex; + } + +#ifdef WEBRTC_MEDIACODEC_DEBUG + CSFLogDebug(logTag, "%s WebrtcMediaCodecVP8VideoEncoder::Encode() dequeue OMX input buffer took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time)); +#endif + + if (inputIndex >= 0) { + JNIEnv* env = jsjni_GetJNIForThread(); + jobject buffer = env->GetObjectArrayElement(mInputBuffers, inputIndex); + void* directBuffer = env->GetDirectBufferAddress(buffer); + + uint8_t* dstY = static_cast<uint8_t*>(directBuffer); + uint16_t* dstUV = reinterpret_cast<uint16_t*>(dstY + sizeY); + + bool converted = I420toNV12(dstY, dstUV, inputImage); + if (!converted) { + CSFLogError(logTag, "%s WebrtcMediaCodecVP8VideoEncoder::Encode() convert input buffer to NV12 error.", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + env->DeleteLocalRef(buffer); + +#ifdef WEBRTC_MEDIACODEC_DEBUG + time = PR_IntervalNow(); + CSFLogError(logTag, "%s queue input buffer inputIndex = %d", __FUNCTION__, inputIndex); +#endif + + mMediaCodecEncoder->QueueInputBuffer(inputIndex, 0, size, inputImage.render_time_ms() * PR_USEC_PER_MSEC /* ms to us */, 0); +#ifdef WEBRTC_MEDIACODEC_DEBUG + CSFLogDebug(logTag, "%s WebrtcMediaCodecVP8VideoEncoder::Encode() queue input buffer took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time)); +#endif + mEncodedImage._encodedWidth = inputImage.width(); + mEncodedImage._encodedHeight = inputImage.height(); + mEncodedImage._timeStamp = inputImage.timestamp(); + mEncodedImage.capture_time_ms_ = inputImage.timestamp(); + + nsresult res; + BufferInfo::LocalRef bufferInfo; + res = BufferInfo::New(&bufferInfo); + if (NS_FAILED(res)) { + CSFLogDebug(logTag, "WebrtcMediaCodecVP8VideoEncoder::%s, BufferInfo::New return err = %d", + __FUNCTION__, (int)res); + return -1; + } + + int32_t outputIndex = mMediaCodecEncoder->DequeueOutputBuffer(bufferInfo); + + if (outputIndex == MediaCodec::INFO_TRY_AGAIN_LATER) { + // Not an error: output not available yet. Try later. + CSFLogDebug(logTag, "%s dequeue output buffer try again:%d", __FUNCTION__, outputIndex); + } else if (outputIndex == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) { + // handle format change + CSFLogDebug(logTag, "%s dequeue output buffer format changed:%d", __FUNCTION__, outputIndex); + } else if (outputIndex == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) { + CSFLogDebug(logTag, "%s dequeue output buffer changed:%d", __FUNCTION__, outputIndex); + mMediaCodecEncoder->GetOutputBuffers(); + } else if (outputIndex < 0) { + CSFLogDebug(logTag, "%s dequeue output buffer unknow error:%d", __FUNCTION__, outputIndex); + } else { +#ifdef WEBRTC_MEDIACODEC_DEBUG + CSFLogDebug(logTag, "%s dequeue output buffer return status is %d took %u ms", __FUNCTION__, outputIndex, PR_IntervalToMilliseconds(PR_IntervalNow()-time)); +#endif + + JNIEnv* env = jsjni_GetJNIForThread(); + jobject buffer = env->GetObjectArrayElement(mOutputBuffers, outputIndex); + if (buffer) { + int32_t offset; + bufferInfo->Offset(&offset); + int32_t flags; + bufferInfo->Flags(&flags); + + // The buffer will be null on Android L if we are decoding to a Surface + void* directBuffer = reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(buffer)) + offset; + + if (flags == MediaCodec::BUFFER_FLAG_SYNC_FRAME) { + mEncodedImage._frameType = webrtc::kKeyFrame; + } else { + mEncodedImage._frameType = webrtc::kDeltaFrame; + } + mEncodedImage._completeFrame = true; + + int32_t size; + bufferInfo->Size(&size); +#ifdef WEBRTC_MEDIACODEC_DEBUG + CSFLogDebug(logTag, "%s dequeue output buffer ok, index:%d, buffer size = %d, buffer offset = %d, flags = %d", __FUNCTION__, outputIndex, size, offset, flags); +#endif + + if(VerifyAndAllocate(size) == -1) { + CSFLogDebug(logTag, "%s VerifyAndAllocate buffers failed", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + mEncodedImage._length = size; + + // xxx It's too bad the mediacodec API forces us to memcpy this.... + // we should find a way that able to 'hold' the buffer or transfer it from inputImage (ping-pong + // buffers or select them from a small pool) + memcpy(mEncodedImage._buffer, directBuffer, mEncodedImage._length); + + webrtc::CodecSpecificInfo info; + info.codecType = webrtc::kVideoCodecVP8; + info.codecSpecific.VP8.pictureId = -1; + info.codecSpecific.VP8.tl0PicIdx = -1; + info.codecSpecific.VP8.keyIdx = -1; + info.codecSpecific.VP8.temporalIdx = 1; + + // Generate a header describing a single fragment. + webrtc::RTPFragmentationHeader header; + memset(&header, 0, sizeof(header)); + header.VerifyAndAllocateFragmentationHeader(1); + header.fragmentationLength[0] = mEncodedImage._length; + + mCallback->Encoded(mEncodedImage, &info, &header); + + mMediaCodecEncoder->ReleaseOutputBuffer(outputIndex, false); + env->DeleteLocalRef(buffer); + } + } + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t WebrtcMediaCodecVP8VideoEncoder::RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* callback) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + mCallback = callback; + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t WebrtcMediaCodecVP8VideoEncoder::Release() { + + CSFLogDebug(logTag, "%s ", __FUNCTION__); + delete mMediaCodecEncoder; + mMediaCodecEncoder = nullptr; + + delete [] mEncodedImage._buffer; + mEncodedImage._buffer = nullptr; + mEncodedImage._size = 0; + + return WEBRTC_VIDEO_CODEC_OK; +} + +WebrtcMediaCodecVP8VideoEncoder::~WebrtcMediaCodecVP8VideoEncoder() { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + Release(); +} + +int32_t WebrtcMediaCodecVP8VideoEncoder::SetChannelParameters(uint32_t packetLoss, int64_t rtt) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t WebrtcMediaCodecVP8VideoEncoder::SetRates(uint32_t newBitRate, uint32_t frameRate) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + if (!mMediaCodecEncoder) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + + // XXX + // 1. implement MediaCodec's setParameters method + // 2.find a way to initiate a Java Bundle instance as parameter for MediaCodec setParameters method. + // mMediaCodecEncoder->setParameters + + return WEBRTC_VIDEO_CODEC_OK; +} + +// Decoder. +WebrtcMediaCodecVP8VideoDecoder::WebrtcMediaCodecVP8VideoDecoder() + : mCallback(nullptr) + , mFrameWidth(0) + , mFrameHeight(0) + , mMediaCodecDecoder(nullptr) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); +} + +bool WebrtcMediaCodecVP8VideoDecoder::ResetInputBuffers() { + mInputBuffers = mMediaCodecDecoder->GetInputBuffers(); + + if (!mInputBuffers) + return false; + + return true; +} + +bool WebrtcMediaCodecVP8VideoDecoder::ResetOutputBuffers() { + mOutputBuffers = mMediaCodecDecoder->GetOutputBuffers(); + + if (!mOutputBuffers) + return false; + + return true; +} + + +int32_t WebrtcMediaCodecVP8VideoDecoder::InitDecode( + const webrtc::VideoCodec* codecSettings, + int32_t numberOfCores) { + + if (!mMediaCodecDecoder) { + mMediaCodecDecoder = new WebrtcAndroidMediaCodec(); + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t WebrtcMediaCodecVP8VideoDecoder::Decode( + const webrtc::EncodedImage& inputImage, + bool missingFrames, + const webrtc::RTPFragmentationHeader* fragmentation, + const webrtc::CodecSpecificInfo* codecSpecificInfo, + int64_t renderTimeMs) { + + CSFLogDebug(logTag, "%s, renderTimeMs = %lld ", __FUNCTION__, renderTimeMs); + + if (inputImage._length== 0 || !inputImage._buffer) { + CSFLogDebug(logTag, "%s, input Image invalid. length = %d", __FUNCTION__, inputImage._length); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + if (inputImage._frameType == webrtc::kKeyFrame) { + CSFLogDebug(logTag, "%s, inputImage is Golden frame", + __FUNCTION__); + mFrameWidth = inputImage._encodedWidth; + mFrameHeight = inputImage._encodedHeight; + } + + if (!mMediaCodecDecoder->isStarted) { + if (mFrameWidth == 0 || mFrameHeight == 0) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + mMediaCodecDecoder->SetDecoderCallback(mCallback); + nsresult res = mMediaCodecDecoder->Configure(mFrameWidth, mFrameHeight, nullptr, 0, MEDIACODEC_VIDEO_MIME_VP8, false /* decoder */); + + if (res != NS_OK) { + CSFLogDebug(logTag, "%s, decoder configure return err = %d", + __FUNCTION__, (int)res); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + res = mMediaCodecDecoder->Start(); + + if (NS_FAILED(res)) { + mMediaCodecDecoder->isStarted = false; + CSFLogDebug(logTag, "%s start decoder. err = %d", __FUNCTION__, (int)res); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + bool retBool = ResetInputBuffers(); + if (!retBool) { + CSFLogDebug(logTag, "%s ResetInputBuffers failed.", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_ERROR; + } + retBool = ResetOutputBuffers(); + if (!retBool) { + CSFLogDebug(logTag, "%s ResetOutputBuffers failed.", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + mMediaCodecDecoder->isStarted = true; + } +#ifdef WEBRTC_MEDIACODEC_DEBUG + uint32_t time = PR_IntervalNow(); + CSFLogDebug(logTag, "%s start decoder took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time)); +#endif + + bool feedFrame = true; + int32_t ret = WEBRTC_VIDEO_CODEC_ERROR; + + while (feedFrame) { + ret = mMediaCodecDecoder->FeedMediaCodecInput(inputImage, renderTimeMs); + feedFrame = (ret == -1); + } + + CSFLogDebug(logTag, "%s end, ret = %d", __FUNCTION__, ret); + + return ret; +} + +void WebrtcMediaCodecVP8VideoDecoder::DecodeFrame(EncodedFrame* frame) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); +} + +int32_t WebrtcMediaCodecVP8VideoDecoder::RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + mCallback = callback; + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t WebrtcMediaCodecVP8VideoDecoder::Release() { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + delete mMediaCodecDecoder; + mMediaCodecDecoder = nullptr; + + return WEBRTC_VIDEO_CODEC_OK; +} + +WebrtcMediaCodecVP8VideoDecoder::~WebrtcMediaCodecVP8VideoDecoder() { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + + Release(); +} + +int32_t WebrtcMediaCodecVP8VideoDecoder::Reset() { + CSFLogDebug(logTag, "%s ", __FUNCTION__); + return WEBRTC_VIDEO_CODEC_OK; +} + +} diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h new file mode 100644 index 000000000..9d7e900fe --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h @@ -0,0 +1,114 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WebrtcMediaCodecVP8VideoCodec_h__ +#define WebrtcMediaCodecVP8VideoCodec_h__ + +#include "mozilla/Mutex.h" +#include "nsThreadUtils.h" +#include "nsAutoPtr.h" + +#include "MediaConduitInterface.h" +#include "AudioConduit.h" +#include "VideoConduit.h" + +namespace mozilla { + +struct EncodedFrame { + uint32_t width_; + uint32_t height_; + uint32_t timeStamp_; + uint64_t decode_timestamp_; +}; + +class WebrtcAndroidMediaCodec; + +class WebrtcMediaCodecVP8VideoEncoder : public WebrtcVideoEncoder { +public: + WebrtcMediaCodecVP8VideoEncoder(); + + virtual ~WebrtcMediaCodecVP8VideoEncoder() override; + + // Implement VideoEncoder interface. + virtual uint64_t PluginID() const override { return 0; } + + virtual int32_t InitEncode(const webrtc::VideoCodec* codecSettings, + int32_t numberOfCores, + size_t maxPayloadSize) override; + + virtual int32_t Encode(const webrtc::I420VideoFrame& inputImage, + const webrtc::CodecSpecificInfo* codecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* frame_types) override; + + virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* callback) override; + + virtual int32_t Release() override; + + virtual int32_t SetChannelParameters(uint32_t packetLoss, int64_t rtt) override; + + virtual int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override; + +private: + int32_t VerifyAndAllocate(const uint32_t minimumSize); + bool ResetInputBuffers(); + bool ResetOutputBuffers(); + + size_t mMaxPayloadSize; + uint32_t mTimestamp; + webrtc::EncodedImage mEncodedImage; + webrtc::EncodedImageCallback* mCallback; + uint32_t mFrameWidth; + uint32_t mFrameHeight; + + WebrtcAndroidMediaCodec* mMediaCodecEncoder; + + jobjectArray mInputBuffers; + jobjectArray mOutputBuffers; +}; + +class WebrtcMediaCodecVP8VideoDecoder : public WebrtcVideoDecoder { +public: + WebrtcMediaCodecVP8VideoDecoder(); + + virtual ~WebrtcMediaCodecVP8VideoDecoder() override; + + // Implement VideoDecoder interface. + virtual uint64_t PluginID() const override { return 0; } + + virtual int32_t InitDecode(const webrtc::VideoCodec* codecSettings, + int32_t numberOfCores) override; + + virtual int32_t Decode(const webrtc::EncodedImage& inputImage, + bool missingFrames, + const webrtc::RTPFragmentationHeader* fragmentation, + const webrtc::CodecSpecificInfo* + codecSpecificInfo = NULL, + int64_t renderTimeMs = -1) override; + + virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) override; + + virtual int32_t Release() override; + + virtual int32_t Reset() override; + +private: + void DecodeFrame(EncodedFrame* frame); + void RunCallback(); + bool ResetInputBuffers(); + bool ResetOutputBuffers(); + + webrtc::DecodedImageCallback* mCallback; + + uint32_t mFrameWidth; + uint32_t mFrameHeight; + + WebrtcAndroidMediaCodec* mMediaCodecDecoder; + jobjectArray mInputBuffers; + jobjectArray mOutputBuffers; + +}; + +} + +#endif // WebrtcMediaCodecVP8VideoCodec_h__ diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp new file mode 100644 index 000000000..dc052f4e0 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp @@ -0,0 +1,1253 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "CSFLog.h" + +#include "WebrtcOMXH264VideoCodec.h" + +// Android/Stagefright +#include <avc_utils.h> +#include <binder/ProcessState.h> +#include <foundation/ABuffer.h> +#include <foundation/AMessage.h> +#include <gui/Surface.h> +#include <media/ICrypto.h> +#include <media/stagefright/MediaCodec.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MetaData.h> +#include <OMX_Component.h> +using namespace android; + +// WebRTC +//#include "webrtc/common_video/interface/texture_video_frame.h" +#include "webrtc/video_engine/include/vie_external_codec.h" +#include "runnable_utils.h" + +// Gecko +#if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21 +#include "GonkBufferQueueProducer.h" +#endif +#include "GonkNativeWindow.h" +#include "GrallocImages.h" +#include "mozilla/Atomics.h" +#include "mozilla/Mutex.h" +#include "nsThreadUtils.h" +#include "OMXCodecWrapper.h" +#include "TextureClient.h" +#include "mozilla/IntegerPrintfMacros.h" + +#define DEQUEUE_BUFFER_TIMEOUT_US (100 * 1000ll) // 100ms. +#define START_DEQUEUE_BUFFER_TIMEOUT_US (10 * DEQUEUE_BUFFER_TIMEOUT_US) // 1s. +#define DRAIN_THREAD_TIMEOUT_US (1000 * 1000ll) // 1s. + +#define WOHVC_LOG_TAG "WebrtcOMXH264VideoCodec" +#define CODEC_LOGV(...) CSFLogInfo(WOHVC_LOG_TAG, __VA_ARGS__) +#define CODEC_LOGD(...) CSFLogDebug(WOHVC_LOG_TAG, __VA_ARGS__) +#define CODEC_LOGI(...) CSFLogInfo(WOHVC_LOG_TAG, __VA_ARGS__) +#define CODEC_LOGW(...) CSFLogWarn(WOHVC_LOG_TAG, __VA_ARGS__) +#define CODEC_LOGE(...) CSFLogError(WOHVC_LOG_TAG, __VA_ARGS__) + +namespace mozilla { + +static const uint8_t kNALStartCode[] = { 0x00, 0x00, 0x00, 0x01 }; +enum { + kNALTypeIDR = 5, + kNALTypeSPS = 7, + kNALTypePPS = 8, +}; + +// NS_INLINE_DECL_THREADSAFE_REFCOUNTING() cannot be used directly in +// ImageNativeHandle below because the return type of webrtc::NativeHandle +// AddRef()/Release() conflicts with those defined in macro. To avoid another +// copy/paste of ref-counting implementation here, this dummy base class +// is created to proivde another level of indirection. +class DummyRefCountBase { +public: + // Use the name of real class for logging. + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DummyRefCountBase) +protected: + // To make sure subclass will be deleted/destructed properly. + virtual ~DummyRefCountBase() {} +}; + +// This function implements 2 interafces: +// 1. webrtc::NativeHandle: to wrap layers::Image object so decoded frames can +// be passed through WebRTC rendering pipeline using TextureVideoFrame. +// 2. ImageHandle: for renderer to get the image object inside without knowledge +// about webrtc::NativeHandle. +class ImageNativeHandle final + : public webrtc::NativeHandle + , public DummyRefCountBase +{ +public: + ImageNativeHandle(layers::Image* aImage) + : mImage(aImage) + {} + + // Implement webrtc::NativeHandle. + virtual void* GetHandle() override { return mImage.get(); } + + virtual int AddRef() override + { + return DummyRefCountBase::AddRef(); + } + + virtual int Release() override + { + return DummyRefCountBase::Release(); + } + +private: + RefPtr<layers::Image> mImage; +}; + +struct EncodedFrame +{ + uint32_t mWidth; + uint32_t mHeight; + uint32_t mTimestamp; + int64_t mRenderTimeMs; +}; + +static void +ShutdownThread(nsCOMPtr<nsIThread>& aThread) +{ + aThread->Shutdown(); +} + +// Base runnable class to repeatly pull OMX output buffers in seperate thread. +// How to use: +// - implementing DrainOutput() to get output. Remember to return false to tell +// drain not to pop input queue. +// - call QueueInput() to schedule a run to drain output. The input, aFrame, +// should contains corresponding info such as image size and timestamps for +// DrainOutput() implementation to construct data needed by encoded/decoded +// callbacks. +// TODO: Bug 997110 - Revisit queue/drain logic. Current design assumes that +// encoder only generate one output buffer per input frame and won't work +// if encoder drops frames or generates multiple output per input. +class OMXOutputDrain : public Runnable +{ +public: + void Start() { + CODEC_LOGD("OMXOutputDrain starting"); + MonitorAutoLock lock(mMonitor); + if (mThread == nullptr) { + NS_NewNamedThread("OMXOutputDrain", getter_AddRefs(mThread)); + } + CODEC_LOGD("OMXOutputDrain started"); + mEnding = false; + mThread->Dispatch(this, NS_DISPATCH_NORMAL); + } + + void Stop() { + CODEC_LOGD("OMXOutputDrain stopping"); + MonitorAutoLock lock(mMonitor); + mEnding = true; + lock.NotifyAll(); // In case Run() is waiting. + + if (mThread != nullptr) { + MonitorAutoUnlock unlock(mMonitor); + CODEC_LOGD("OMXOutputDrain thread shutdown"); + NS_DispatchToMainThread( + WrapRunnableNM<decltype(&ShutdownThread), + nsCOMPtr<nsIThread> >(&ShutdownThread, mThread)); + mThread = nullptr; + } + CODEC_LOGD("OMXOutputDrain stopped"); + } + + void QueueInput(const EncodedFrame& aFrame) + { + MonitorAutoLock lock(mMonitor); + + MOZ_ASSERT(mThread); + + mInputFrames.push(aFrame); + // Notify Run() about queued input and it can start working. + lock.NotifyAll(); + } + + NS_IMETHOD Run() override + { + MonitorAutoLock lock(mMonitor); + if (mEnding) { + return NS_OK; + } + MOZ_ASSERT(mThread); + + while (true) { + if (mInputFrames.empty()) { + // Wait for new input. + lock.Wait(); + } + + if (mEnding) { + CODEC_LOGD("OMXOutputDrain Run() ending"); + // Stop draining. + break; + } + + MOZ_ASSERT(!mInputFrames.empty()); + { + // Release monitor while draining because it's blocking. + MonitorAutoUnlock unlock(mMonitor); + DrainOutput(); + } + } + + CODEC_LOGD("OMXOutputDrain Ended"); + return NS_OK; + } + +protected: + OMXOutputDrain() + : mMonitor("OMXOutputDrain monitor") + , mEnding(false) + {} + + // Drain output buffer for input frame queue mInputFrames. + // mInputFrames contains info such as size and time of the input frames. + // We have to give a queue to handle encoder frame skips - we can input 10 + // frames and get one back. NOTE: any access of aInputFrames MUST be preceded + // locking mMonitor! + + // Blocks waiting for decoded buffers, but for a limited period because + // we need to check for shutdown. + virtual bool DrainOutput() = 0; + +protected: + // This monitor protects all things below it, and is also used to + // wait/notify queued input. + Monitor mMonitor; + std::queue<EncodedFrame> mInputFrames; + +private: + // also protected by mMonitor + nsCOMPtr<nsIThread> mThread; + bool mEnding; +}; + +// Assumption: SPS is first paramset or is not present +static bool IsParamSets(uint8_t* aData, size_t aSize) +{ + MOZ_ASSERT(aData && aSize > sizeof(kNALStartCode)); + return (aData[sizeof(kNALStartCode)] & 0x1f) == kNALTypeSPS; +} + +// get the length of any pre-pended SPS/PPS's +static size_t ParamSetLength(uint8_t* aData, size_t aSize) +{ + const uint8_t* data = aData; + size_t size = aSize; + const uint8_t* nalStart = nullptr; + size_t nalSize = 0; + while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) { + if ((*nalStart & 0x1f) != kNALTypeSPS && + (*nalStart & 0x1f) != kNALTypePPS) { + MOZ_ASSERT(nalStart - sizeof(kNALStartCode) >= aData); + return (nalStart - sizeof(kNALStartCode)) - aData; // SPS/PPS/iframe + } + } + return aSize; // it's only SPS/PPS +} + +// H.264 decoder using stagefright. +// It implements gonk native window callback to receive buffers from +// MediaCodec::RenderOutputBufferAndRelease(). +class WebrtcOMXDecoder final : public GonkNativeWindowNewFrameCallback +{ + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcOMXDecoder) + +private: + virtual ~WebrtcOMXDecoder() + { + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p OMX destructor", this); + if (mStarted) { + Stop(); + } + if (mCodec != nullptr) { + mCodec->release(); + mCodec.clear(); + } + mLooper.clear(); + } + +public: + WebrtcOMXDecoder(const char* aMimeType, + webrtc::DecodedImageCallback* aCallback) + : mWidth(0) + , mHeight(0) + , mStarted(false) + , mCallback(aCallback) + , mDecodedFrameLock("WebRTC decoded frame lock") + , mEnding(false) + { + // Create binder thread pool required by stagefright. + android::ProcessState::self()->startThreadPool(); + + mLooper = new ALooper; + mLooper->start(); + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p creating decoder", this); + mCodec = MediaCodec::CreateByType(mLooper, aMimeType, false /* encoder */); + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p OMX created", this); + } + + // Find SPS in input data and extract picture width and height if found. + static status_t ExtractPicDimensions(uint8_t* aData, size_t aSize, + int32_t* aWidth, int32_t* aHeight) + { + MOZ_ASSERT(aData && aSize > sizeof(kNALStartCode)); + if ((aData[sizeof(kNALStartCode)] & 0x1f) != kNALTypeSPS) { + return ERROR_MALFORMED; + } + sp<ABuffer> sps = new ABuffer(&aData[sizeof(kNALStartCode)], aSize - sizeof(kNALStartCode)); + FindAVCDimensions(sps, aWidth, aHeight); + return OK; + } + + // Configure decoder using image width/height. + status_t ConfigureWithPicDimensions(int32_t aWidth, int32_t aHeight) + { + MOZ_ASSERT(mCodec != nullptr); + if (mCodec == nullptr) { + return INVALID_OPERATION; + } + + CODEC_LOGD("OMX:%p decoder width:%d height:%d", this, aWidth, aHeight); + + sp<AMessage> config = new AMessage(); + config->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC); + config->setInt32("width", aWidth); + config->setInt32("height", aHeight); + mWidth = aWidth; + mHeight = aHeight; + + sp<Surface> surface = nullptr; +#if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21 + sp<IGraphicBufferProducer> producer; + sp<IGonkGraphicBufferConsumer> consumer; + GonkBufferQueue::createBufferQueue(&producer, &consumer); + mNativeWindow = new GonkNativeWindow(consumer); +#else + mNativeWindow = new GonkNativeWindow(); +#endif + if (mNativeWindow.get()) { + // listen to buffers queued by MediaCodec::RenderOutputBufferAndRelease(). + mNativeWindow->setNewFrameCallback(this); + // XXX remove buffer changes after a better solution lands - bug 1009420 +#if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21 + static_cast<GonkBufferQueueProducer*>(producer.get())->setSynchronousMode(false); + // More spare buffers to avoid OMX decoder waiting for native window + consumer->setMaxAcquiredBufferCount(WEBRTC_OMX_H264_MIN_DECODE_BUFFERS); + surface = new Surface(producer); +#else + sp<GonkBufferQueue> bq = mNativeWindow->getBufferQueue(); + bq->setSynchronousMode(false); + // More spare buffers to avoid OMX decoder waiting for native window + bq->setMaxAcquiredBufferCount(WEBRTC_OMX_H264_MIN_DECODE_BUFFERS); + surface = new Surface(bq); +#endif + } + status_t result = mCodec->configure(config, surface, nullptr, 0); + if (result == OK) { + CODEC_LOGD("OMX:%p decoder configured", this); + result = Start(); + } + return result; + } + + status_t + FillInput(const webrtc::EncodedImage& aEncoded, bool aIsFirstFrame, + int64_t& aRenderTimeMs) + { + MOZ_ASSERT(mCodec != nullptr && aEncoded._buffer && aEncoded._length > 0); + if (mCodec == nullptr || !aEncoded._buffer || aEncoded._length == 0) { + return INVALID_OPERATION; + } + + // Break input encoded data into NALUs and send each one to decode. + // 8x10 decoder doesn't allow picture coding NALs to be in the same buffer + // with SPS/PPS (BUFFER_FLAG_CODECCONFIG) per QC + const uint8_t* data = aEncoded._buffer; + size_t size = aEncoded._length; + const uint8_t* nalStart = nullptr; + size_t nalSize = 0; + status_t err = OK; + + // this returns a pointer to the NAL byte (after the StartCode) + while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) { + // Individual NALU inherits metadata from input encoded data. + webrtc::EncodedImage nalu(aEncoded); + + nalu._buffer = const_cast<uint8_t*>(nalStart) - sizeof(kNALStartCode); + MOZ_ASSERT(nalu._buffer >= aEncoded._buffer); + nalu._length = nalSize + sizeof(kNALStartCode); + MOZ_ASSERT(nalu._buffer + nalu._length <= aEncoded._buffer + aEncoded._length); + + size_t index; + err = mCodec->dequeueInputBuffer(&index, + aIsFirstFrame ? START_DEQUEUE_BUFFER_TIMEOUT_US : DEQUEUE_BUFFER_TIMEOUT_US); + if (err != OK) { + if (err != -EAGAIN) { + CODEC_LOGE("decode dequeue input buffer error:%d", err); + } else { + CODEC_LOGE("decode dequeue 100ms without a buffer (EAGAIN)"); + } + return err; + } + + // Prepend start code to buffer. + MOZ_ASSERT(memcmp(nalu._buffer, kNALStartCode, sizeof(kNALStartCode)) == 0); + const sp<ABuffer>& omxIn = mInputBuffers.itemAt(index); + MOZ_ASSERT(omxIn->capacity() >= nalu._length); + omxIn->setRange(0, nalu._length); + // Copying is needed because MediaCodec API doesn't support externally + // allocated buffer as input. + uint8_t* dst = omxIn->data(); + memcpy(dst, nalu._buffer, nalu._length); + int64_t inputTimeUs = (nalu._timeStamp * 1000ll) / 90; // 90kHz -> us. + // Assign input flags according to input buffer NALU and frame types. + uint32_t flags; + int nalType = dst[sizeof(kNALStartCode)] & 0x1f; + switch (nalType) { + case kNALTypeSPS: + case kNALTypePPS: + flags = MediaCodec::BUFFER_FLAG_CODECCONFIG; + break; + case kNALTypeIDR: + flags = MediaCodec::BUFFER_FLAG_SYNCFRAME; + break; + default: + flags = 0; + break; + } + CODEC_LOGD("Decoder input: %d bytes (NAL 0x%02x), time %lld (%u), flags 0x%x", + nalu._length, dst[sizeof(kNALStartCode)], inputTimeUs, nalu._timeStamp, flags); + err = mCodec->queueInputBuffer(index, 0, nalu._length, inputTimeUs, flags); + if (err == OK && !(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) { + if (mOutputDrain == nullptr) { + mOutputDrain = new OutputDrain(this); + mOutputDrain->Start(); + } + EncodedFrame frame; + frame.mWidth = mWidth; + frame.mHeight = mHeight; + frame.mTimestamp = nalu._timeStamp; + frame.mRenderTimeMs = aRenderTimeMs; + mOutputDrain->QueueInput(frame); + } + } + + return err; + } + + status_t + DrainOutput(std::queue<EncodedFrame>& aInputFrames, Monitor& aMonitor) + { + MOZ_ASSERT(mCodec != nullptr); + if (mCodec == nullptr) { + return INVALID_OPERATION; + } + + size_t index = 0; + size_t outOffset = 0; + size_t outSize = 0; + int64_t outTime = -1ll; + uint32_t outFlags = 0; + status_t err = mCodec->dequeueOutputBuffer(&index, &outOffset, &outSize, + &outTime, &outFlags, + DRAIN_THREAD_TIMEOUT_US); + switch (err) { + case OK: + break; + case -EAGAIN: + // Not an error: output not available yet. Try later. + CODEC_LOGI("decode dequeue OMX output buffer timed out. Try later."); + return err; + case INFO_FORMAT_CHANGED: + // Not an error: will get this value when OMX output buffer is enabled, + // or when input size changed. + CODEC_LOGD("decode dequeue OMX output buffer format change"); + return err; + case INFO_OUTPUT_BUFFERS_CHANGED: + // Not an error: will get this value when OMX output buffer changed + // (probably because of input size change). + CODEC_LOGD("decode dequeue OMX output buffer change"); + err = mCodec->getOutputBuffers(&mOutputBuffers); + MOZ_ASSERT(err == OK); + return INFO_OUTPUT_BUFFERS_CHANGED; + default: + CODEC_LOGE("decode dequeue OMX output buffer error:%d", err); + // Return OK to instruct OutputDrain to drop input from queue. + MonitorAutoLock lock(aMonitor); + aInputFrames.pop(); + return OK; + } + + CODEC_LOGD("Decoder output: %d bytes, offset %u, time %lld, flags 0x%x", + outSize, outOffset, outTime, outFlags); + if (mCallback) { + EncodedFrame frame; + { + MonitorAutoLock lock(aMonitor); + frame = aInputFrames.front(); + aInputFrames.pop(); + } + { + // Store info of this frame. OnNewFrame() will need the timestamp later. + MutexAutoLock lock(mDecodedFrameLock); + if (mEnding) { + mCodec->releaseOutputBuffer(index); + return err; + } + mDecodedFrames.push(frame); + } + // Ask codec to queue buffer back to native window. OnNewFrame() will be + // called. + mCodec->renderOutputBufferAndRelease(index); + // Once consumed, buffer will be queued back to GonkNativeWindow for codec + // to dequeue/use. + } else { + mCodec->releaseOutputBuffer(index); + } + + return err; + } + + // Will be called when MediaCodec::RenderOutputBufferAndRelease() returns + // buffers back to native window for rendering. + void OnNewFrame() override + { + RefPtr<layers::TextureClient> buffer = mNativeWindow->getCurrentBuffer(); + if (!buffer) { + CODEC_LOGE("Decoder NewFrame: Get null buffer"); + return; + } + + gfx::IntSize picSize(buffer->GetSize()); + nsAutoPtr<layers::GrallocImage> grallocImage(new layers::GrallocImage()); + grallocImage->AdoptData(buffer, picSize); + + // Get timestamp of the frame about to render. + int64_t timestamp = -1; + int64_t renderTimeMs = -1; + { + MutexAutoLock lock(mDecodedFrameLock); + if (mDecodedFrames.empty()) { + return; + } + EncodedFrame decoded = mDecodedFrames.front(); + timestamp = decoded.mTimestamp; + renderTimeMs = decoded.mRenderTimeMs; + mDecodedFrames.pop(); + } + MOZ_ASSERT(timestamp >= 0 && renderTimeMs >= 0); + + CODEC_LOGD("Decoder NewFrame: %dx%d, timestamp %lld, renderTimeMs %lld", + picSize.width, picSize.height, timestamp, renderTimeMs); + + nsAutoPtr<webrtc::I420VideoFrame> videoFrame(new webrtc::I420VideoFrame( + new ImageNativeHandle(grallocImage.forget()), + picSize.width, + picSize.height, + timestamp, + renderTimeMs)); + if (videoFrame != nullptr) { + mCallback->Decoded(*videoFrame); + } + } + +private: + class OutputDrain : public OMXOutputDrain + { + public: + OutputDrain(WebrtcOMXDecoder* aOMX) + : OMXOutputDrain() + , mOMX(aOMX) + {} + + protected: + virtual bool DrainOutput() override + { + return (mOMX->DrainOutput(mInputFrames, mMonitor) == OK); + } + + private: + WebrtcOMXDecoder* mOMX; + }; + + status_t Start() + { + MOZ_ASSERT(!mStarted); + if (mStarted) { + return OK; + } + + { + MutexAutoLock lock(mDecodedFrameLock); + mEnding = false; + } + status_t err = mCodec->start(); + if (err == OK) { + mStarted = true; + mCodec->getInputBuffers(&mInputBuffers); + mCodec->getOutputBuffers(&mOutputBuffers); + } + + return err; + } + + status_t Stop() + { + MOZ_ASSERT(mStarted); + if (!mStarted) { + return OK; + } + + CODEC_LOGD("OMXOutputDrain decoder stopping"); + // Drop all 'pending to render' frames. + { + MutexAutoLock lock(mDecodedFrameLock); + mEnding = true; + while (!mDecodedFrames.empty()) { + mDecodedFrames.pop(); + } + } + + if (mOutputDrain != nullptr) { + CODEC_LOGD("decoder's OutputDrain stopping"); + mOutputDrain->Stop(); + mOutputDrain = nullptr; + } + + status_t err = mCodec->stop(); + if (err == OK) { + mInputBuffers.clear(); + mOutputBuffers.clear(); + mStarted = false; + } else { + MOZ_ASSERT(false); + } + CODEC_LOGD("OMXOutputDrain decoder stopped"); + return err; + } + + sp<ALooper> mLooper; + sp<MediaCodec> mCodec; // OMXCodec + int mWidth; + int mHeight; + android::Vector<sp<ABuffer> > mInputBuffers; + android::Vector<sp<ABuffer> > mOutputBuffers; + bool mStarted; + + sp<GonkNativeWindow> mNativeWindow; + + RefPtr<OutputDrain> mOutputDrain; + webrtc::DecodedImageCallback* mCallback; + + Mutex mDecodedFrameLock; // To protect mDecodedFrames and mEnding + std::queue<EncodedFrame> mDecodedFrames; + bool mEnding; +}; + +class EncOutputDrain : public OMXOutputDrain +{ +public: + EncOutputDrain(OMXVideoEncoder* aOMX, webrtc::EncodedImageCallback* aCallback) + : OMXOutputDrain() + , mOMX(aOMX) + , mCallback(aCallback) + , mIsPrevFrameParamSets(false) + {} + +protected: + virtual bool DrainOutput() override + { + nsTArray<uint8_t> output; + int64_t timeUs = -1ll; + int flags = 0; + nsresult rv = mOMX->GetNextEncodedFrame(&output, &timeUs, &flags, + DRAIN_THREAD_TIMEOUT_US); + if (NS_WARN_IF(NS_FAILED(rv))) { + // Fail to get encoded frame. The corresponding input frame should be + // removed. + // We'll treat this like a skipped frame + return true; + } + + if (output.Length() == 0) { + // No encoded data yet. Try later. + CODEC_LOGD("OMX: (encode no output available this time)"); + return false; + } + + // Conversion to us rounds down, so we need to round up for us->90KHz + uint32_t target_timestamp = (timeUs * 90ll + 999) / 1000; // us -> 90KHz + // 8x10 v2.0 encoder doesn't set this reliably: + //bool isParamSets = (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG); + // Assume that SPS/PPS will be at the start of any buffer + // Assume PPS will not be in a separate buffer - SPS/PPS or SPS/PPS/iframe + bool isParamSets = IsParamSets(output.Elements(), output.Length()); + bool isIFrame = (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME); + CODEC_LOGD("OMX: encoded frame (%d): time %lld (%u), flags x%x", + output.Length(), timeUs, target_timestamp, flags); + // Should not be parameter sets and I-frame at the same time. + // Except that it is possible, apparently, after an encoder re-config (bug 1063883) + // MOZ_ASSERT(!(isParamSets && isIFrame)); + + if (mCallback) { + // Implementation here assumes encoder output to be a buffer containing + // parameter sets(SPS + PPS) followed by a series of buffers, each for + // one input frame. + // TODO: handle output violating this assumpton in bug 997110. + webrtc::EncodedImage encoded(output.Elements(), output.Length(), + output.Capacity()); + encoded._frameType = (isParamSets || isIFrame) ? + webrtc::kKeyFrame : webrtc::kDeltaFrame; + EncodedFrame input_frame; + { + MonitorAutoLock lock(mMonitor); + // will sps/pps have the same timestamp as their iframe? Initial one on 8x10 has + // 0 timestamp. + if (isParamSets) { + // Let's assume it was the first item in the queue, but leave it there since an + // IDR will follow + input_frame = mInputFrames.front(); + } else { + do { + if (mInputFrames.empty()) { + // Let's assume it was the last item in the queue, but leave it there + mInputFrames.push(input_frame); + CODEC_LOGE("OMX: encoded timestamp %u which doesn't match input queue!! (head %u)", + target_timestamp, input_frame.mTimestamp); + break; + } + + input_frame = mInputFrames.front(); + mInputFrames.pop(); + if (input_frame.mTimestamp != target_timestamp) { + CODEC_LOGD("OMX: encoder skipped frame timestamp %u", input_frame.mTimestamp); + } + } while (input_frame.mTimestamp != target_timestamp); + } + } + + encoded._encodedWidth = input_frame.mWidth; + encoded._encodedHeight = input_frame.mHeight; + encoded._timeStamp = input_frame.mTimestamp; + encoded.capture_time_ms_ = input_frame.mRenderTimeMs; + encoded._completeFrame = true; + + CODEC_LOGD("Encoded frame: %d bytes, %dx%d, is_param %d, is_iframe %d, timestamp %u, captureTimeMs %" PRIu64, + encoded._length, encoded._encodedWidth, encoded._encodedHeight, + isParamSets, isIFrame, encoded._timeStamp, encoded.capture_time_ms_); + // Prepend SPS/PPS to I-frames unless they were sent last time. + SendEncodedDataToCallback(encoded, isIFrame && !mIsPrevFrameParamSets && !isParamSets); + // This will be true only for the frame following a paramset block! So if we're + // working with a correct encoder that generates SPS/PPS then iframe always, we + // won't try to insert. (also, don't set if we get SPS/PPS/iframe in one buffer) + mIsPrevFrameParamSets = isParamSets && !isIFrame; + if (isParamSets) { + // copy off the param sets for inserting later + mParamSets.Clear(); + // since we may have SPS/PPS or SPS/PPS/iframe + size_t length = ParamSetLength(encoded._buffer, encoded._length); + MOZ_ASSERT(length > 0); + mParamSets.AppendElements(encoded._buffer, length); + } + } + + return !isParamSets; // not really needed anymore + } + +private: + // Send encoded data to callback.The data will be broken into individual NALUs + // if necessary and sent to callback one by one. This function can also insert + // SPS/PPS NALUs in front of input data if requested. + void SendEncodedDataToCallback(webrtc::EncodedImage& aEncodedImage, + bool aPrependParamSets) + { + if (aPrependParamSets) { + webrtc::EncodedImage prepend(aEncodedImage); + // Insert current parameter sets in front of the input encoded data. + MOZ_ASSERT(mParamSets.Length() > sizeof(kNALStartCode)); // Start code + ... + prepend._length = mParamSets.Length(); + prepend._buffer = mParamSets.Elements(); + // Break into NALUs and send. + CODEC_LOGD("Prepending SPS/PPS: %d bytes, timestamp %u, captureTimeMs %" PRIu64, + prepend._length, prepend._timeStamp, prepend.capture_time_ms_); + SendEncodedDataToCallback(prepend, false); + } + + struct nal_entry { + uint32_t offset; + uint32_t size; + }; + AutoTArray<nal_entry, 1> nals; + + // Break input encoded data into NALUs and send each one to callback. + const uint8_t* data = aEncodedImage._buffer; + size_t size = aEncodedImage._length; + const uint8_t* nalStart = nullptr; + size_t nalSize = 0; + while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) { + // XXX optimize by making buffer an offset + nal_entry nal = {((uint32_t) (nalStart - aEncodedImage._buffer)), (uint32_t) nalSize}; + nals.AppendElement(nal); + } + + size_t num_nals = nals.Length(); + if (num_nals > 0) { + webrtc::RTPFragmentationHeader fragmentation; + fragmentation.VerifyAndAllocateFragmentationHeader(num_nals); + for (size_t i = 0; i < num_nals; i++) { + fragmentation.fragmentationOffset[i] = nals[i].offset; + fragmentation.fragmentationLength[i] = nals[i].size; + } + webrtc::EncodedImage unit(aEncodedImage); + unit._completeFrame = true; + + mCallback->Encoded(unit, nullptr, &fragmentation); + } + } + + OMXVideoEncoder* mOMX; + webrtc::EncodedImageCallback* mCallback; + bool mIsPrevFrameParamSets; + nsTArray<uint8_t> mParamSets; +}; + +// Encoder. +WebrtcOMXH264VideoEncoder::WebrtcOMXH264VideoEncoder() + : mOMX(nullptr) + , mCallback(nullptr) + , mWidth(0) + , mHeight(0) + , mFrameRate(0) + , mBitRateKbps(0) +#ifdef OMX_IDR_NEEDED_FOR_BITRATE + , mBitRateAtLastIDR(0) +#endif + , mOMXConfigured(false) + , mOMXReconfigure(false) +{ + mReservation = new OMXCodecReservation(true); + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p constructed", this); +} + +int32_t +WebrtcOMXH264VideoEncoder::InitEncode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumOfCores, + size_t aMaxPayloadSize) +{ + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p init", this); + + if (mOMX == nullptr) { + nsAutoPtr<OMXVideoEncoder> omx(OMXCodecWrapper::CreateAVCEncoder()); + if (NS_WARN_IF(omx == nullptr)) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + mOMX = omx.forget(); + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p OMX created", this); + } + + if (!mReservation->ReserveOMXCodec()) { + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p Encoder in use", this); + mOMX = nullptr; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + // Defer configuration until 1st frame is received because this function will + // be called more than once, and unfortunately with incorrect setting values + // at first. + mWidth = aCodecSettings->width; + mHeight = aCodecSettings->height; + mFrameRate = aCodecSettings->maxFramerate; + mBitRateKbps = aCodecSettings->startBitrate; + // XXX handle maxpayloadsize (aka mode 0/1) + + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p OMX Encoder reserved", this); + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcOMXH264VideoEncoder::Encode(const webrtc::I420VideoFrame& aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes) +{ + MOZ_ASSERT(mOMX != nullptr); + if (mOMX == nullptr) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + // Have to reconfigure for resolution or framerate changes :-( + // ~220ms initial configure on 8x10, 50-100ms for re-configure it appears + // XXX drop frames while this is happening? + if (aInputImage.width() < 0 || (uint32_t)aInputImage.width() != mWidth || + aInputImage.height() < 0 || (uint32_t)aInputImage.height() != mHeight) { + mWidth = aInputImage.width(); + mHeight = aInputImage.height(); + mOMXReconfigure = true; + } + + if (!mOMXConfigured || mOMXReconfigure) { + if (mOMXConfigured) { + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p reconfiguring encoder %dx%d @ %u fps", + this, mWidth, mHeight, mFrameRate); + mOMXConfigured = false; + } + mOMXReconfigure = false; + // XXX This can take time. Encode() likely assumes encodes are queued "quickly" and + // don't block the input too long. Frames may build up. + + // XXX take from negotiated SDP in codecSpecific data + OMX_VIDEO_AVCLEVELTYPE level = OMX_VIDEO_AVCLevel3; + // OMX_Video_ControlRateConstant is not supported on QC 8x10 + OMX_VIDEO_CONTROLRATETYPE bitrateMode = OMX_Video_ControlRateConstantSkipFrames; + + // Set up configuration parameters for AVC/H.264 encoder. + sp<AMessage> format = new AMessage; + // Fixed values + format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC); + // XXX We should only set to < infinity if we're not using any recovery RTCP options + // However, we MUST set it to a lower value because the 8x10 rate controller + // only changes rate at GOP boundaries.... but it also changes rate on requested GOPs + + // Too long and we have very low bitrates for the first second or two... plus + // bug 1014921 means we have to force them every ~3 seconds or less. + format->setInt32("i-frame-interval", 4 /* seconds */); + // See mozilla::layers::GrallocImage, supports YUV 4:2:0, CbCr width and + // height is half that of Y + format->setInt32("color-format", OMX_COLOR_FormatYUV420SemiPlanar); + format->setInt32("profile", OMX_VIDEO_AVCProfileBaseline); + format->setInt32("level", level); + format->setInt32("bitrate-mode", bitrateMode); + format->setInt32("store-metadata-in-buffers", 0); + // XXX Unfortunately, 8x10 doesn't support this, but ask anyways + format->setInt32("prepend-sps-pps-to-idr-frames", 1); + // Input values. + format->setInt32("width", mWidth); + format->setInt32("height", mHeight); + format->setInt32("stride", mWidth); + format->setInt32("slice-height", mHeight); + format->setInt32("frame-rate", mFrameRate); + format->setInt32("bitrate", mBitRateKbps*1000); + + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p configuring encoder %dx%d @ %d fps, rate %d kbps", + this, mWidth, mHeight, mFrameRate, mBitRateKbps); + nsresult rv = mOMX->ConfigureDirect(format, + OMXVideoEncoder::BlobFormat::AVC_NAL); + if (NS_WARN_IF(NS_FAILED(rv))) { + CODEC_LOGE("WebrtcOMXH264VideoEncoder:%p FAILED configuring encoder %d", this, int(rv)); + return WEBRTC_VIDEO_CODEC_ERROR; + } + mOMXConfigured = true; +#ifdef OMX_IDR_NEEDED_FOR_BITRATE + mLastIDRTime = TimeStamp::Now(); + mBitRateAtLastIDR = mBitRateKbps; +#endif + } + + if (aFrameTypes && aFrameTypes->size() && + ((*aFrameTypes)[0] == webrtc::kKeyFrame)) { + mOMX->RequestIDRFrame(); +#ifdef OMX_IDR_NEEDED_FOR_BITRATE + mLastIDRTime = TimeStamp::Now(); + mBitRateAtLastIDR = mBitRateKbps; + } else if (mBitRateKbps != mBitRateAtLastIDR) { + // 8x10 OMX codec requires a keyframe to shift bitrates! + TimeStamp now = TimeStamp::Now(); + if (mLastIDRTime.IsNull()) { + // paranoia + mLastIDRTime = now; + } + int32_t timeSinceLastIDR = (now - mLastIDRTime).ToMilliseconds(); + + // Balance asking for IDRs too often against direction and amount of bitrate change. + + // HACK for bug 1014921: 8x10 has encode/decode mismatches that build up errors + // if you go too long without an IDR. In normal use, bitrate will change often + // enough to never hit this time limit. + if ((timeSinceLastIDR > 3000) || + (mBitRateKbps < (mBitRateAtLastIDR * 8)/10) || + (timeSinceLastIDR < 300 && mBitRateKbps < (mBitRateAtLastIDR * 9)/10) || + (timeSinceLastIDR < 1000 && mBitRateKbps < (mBitRateAtLastIDR * 97)/100) || + (timeSinceLastIDR >= 1000 && mBitRateKbps < mBitRateAtLastIDR) || + (mBitRateKbps > (mBitRateAtLastIDR * 15)/10) || + (timeSinceLastIDR < 500 && mBitRateKbps > (mBitRateAtLastIDR * 13)/10) || + (timeSinceLastIDR < 1000 && mBitRateKbps > (mBitRateAtLastIDR * 11)/10) || + (timeSinceLastIDR >= 1000 && mBitRateKbps > mBitRateAtLastIDR)) { + CODEC_LOGD("Requesting IDR for bitrate change from %u to %u (time since last idr %dms)", + mBitRateAtLastIDR, mBitRateKbps, timeSinceLastIDR); + + mOMX->RequestIDRFrame(); + mLastIDRTime = now; + mBitRateAtLastIDR = mBitRateKbps; + } +#endif + } + + // Wrap I420VideoFrame input with PlanarYCbCrImage for OMXVideoEncoder. + layers::PlanarYCbCrData yuvData; + yuvData.mYChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kYPlane)); + yuvData.mYSize = gfx::IntSize(aInputImage.width(), aInputImage.height()); + yuvData.mYStride = aInputImage.stride(webrtc::kYPlane); + MOZ_ASSERT(aInputImage.stride(webrtc::kUPlane) == aInputImage.stride(webrtc::kVPlane)); + yuvData.mCbCrStride = aInputImage.stride(webrtc::kUPlane); + yuvData.mCbChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kUPlane)); + yuvData.mCrChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kVPlane)); + yuvData.mCbCrSize = gfx::IntSize((yuvData.mYSize.width + 1) / 2, + (yuvData.mYSize.height + 1) / 2); + yuvData.mPicSize = yuvData.mYSize; + yuvData.mStereoMode = StereoMode::MONO; + layers::RecyclingPlanarYCbCrImage img(nullptr); + // AdoptData() doesn't need AllocateAndGetNewBuffer(); OMXVideoEncoder is ok with this + img.AdoptData(yuvData); + + CODEC_LOGD("Encode frame: %dx%d, timestamp %u (%lld), renderTimeMs %" PRIu64, + aInputImage.width(), aInputImage.height(), + aInputImage.timestamp(), aInputImage.timestamp() * 1000ll / 90, + aInputImage.render_time_ms()); + + nsresult rv = mOMX->Encode(&img, + yuvData.mYSize.width, + yuvData.mYSize.height, + aInputImage.timestamp() * 1000ll / 90, // 90kHz -> us. + 0); + if (rv == NS_OK) { + if (mOutputDrain == nullptr) { + mOutputDrain = new EncOutputDrain(mOMX, mCallback); + mOutputDrain->Start(); + } + EncodedFrame frame; + frame.mWidth = mWidth; + frame.mHeight = mHeight; + frame.mTimestamp = aInputImage.timestamp(); + frame.mRenderTimeMs = aInputImage.render_time_ms(); + mOutputDrain->QueueInput(frame); + } + + return (rv == NS_OK) ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR; +} + +int32_t +WebrtcOMXH264VideoEncoder::RegisterEncodeCompleteCallback( + webrtc::EncodedImageCallback* aCallback) +{ + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set callback:%p", this, aCallback); + MOZ_ASSERT(aCallback); + mCallback = aCallback; + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcOMXH264VideoEncoder::Release() +{ + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p will be released", this); + + if (mOutputDrain != nullptr) { + mOutputDrain->Stop(); + mOutputDrain = nullptr; + } + mOMXConfigured = false; + bool hadOMX = !!mOMX; + mOMX = nullptr; + if (hadOMX) { + mReservation->ReleaseOMXCodec(); + } + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p released", this); + + return WEBRTC_VIDEO_CODEC_OK; +} + +WebrtcOMXH264VideoEncoder::~WebrtcOMXH264VideoEncoder() +{ + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p will be destructed", this); + + Release(); +} + +// Inform the encoder of the new packet loss rate and the round-trip time of +// the network. aPacketLossRate is fraction lost and can be 0~255 +// (255 means 100% lost). +// Note: stagefright doesn't handle these parameters. +int32_t +WebrtcOMXH264VideoEncoder::SetChannelParameters(uint32_t aPacketLossRate, + int64_t aRoundTripTimeMs) +{ + CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set channel packet loss:%u, rtt:%" PRIi64, + this, aPacketLossRate, aRoundTripTimeMs); + + return WEBRTC_VIDEO_CODEC_OK; +} + +// TODO: Bug 997567. Find the way to support frame rate change. +int32_t +WebrtcOMXH264VideoEncoder::SetRates(uint32_t aBitRateKbps, uint32_t aFrameRate) +{ + CODEC_LOGE("WebrtcOMXH264VideoEncoder:%p set bitrate:%u, frame rate:%u (%u))", + this, aBitRateKbps, aFrameRate, mFrameRate); + MOZ_ASSERT(mOMX != nullptr); + if (mOMX == nullptr) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + + // XXX Should use StageFright framerate change, perhaps only on major changes of framerate. + + // Without Stagefright support, Algorithm should be: + // if (frameRate < 50% of configured) { + // drop framerate to next step down that includes current framerate within 50% + // } else if (frameRate > configured) { + // change config to next step up that includes current framerate + // } +#if !defined(TEST_OMX_FRAMERATE_CHANGES) + if (aFrameRate > mFrameRate || + aFrameRate < mFrameRate/2) { + uint32_t old_rate = mFrameRate; + if (aFrameRate >= 15) { + mFrameRate = 30; + } else if (aFrameRate >= 10) { + mFrameRate = 20; + } else if (aFrameRate >= 8) { + mFrameRate = 15; + } else /* if (aFrameRate >= 5)*/ { + // don't go lower; encoder may not be stable + mFrameRate = 10; + } + if (mFrameRate < aFrameRate) { // safety + mFrameRate = aFrameRate; + } + if (old_rate != mFrameRate) { + mOMXReconfigure = true; // force re-configure on next frame + } + } +#else + // XXX for testing, be wild! + if (aFrameRate != mFrameRate) { + mFrameRate = aFrameRate; + mOMXReconfigure = true; // force re-configure on next frame + } +#endif + + // XXX Limit bitrate for 8x10 devices to a specific level depending on fps and resolution + // mBitRateKbps = LimitBitrate8x10(mWidth, mHeight, mFrameRate, aBitRateKbps); + // Rely on global single setting (~720 kbps for HVGA@30fps) for now + if (aBitRateKbps > 700) { + aBitRateKbps = 700; + } + mBitRateKbps = aBitRateKbps; + nsresult rv = mOMX->SetBitrate(mBitRateKbps); + NS_WARNING_ASSERTION(NS_SUCCEEDED(rv), "SetBitrate failed"); + return NS_FAILED(rv) ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR; +} + +// Decoder. +WebrtcOMXH264VideoDecoder::WebrtcOMXH264VideoDecoder() + : mCallback(nullptr) + , mOMX(nullptr) +{ + mReservation = new OMXCodecReservation(false); + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be constructed", this); +} + +int32_t +WebrtcOMXH264VideoDecoder::InitDecode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumOfCores) +{ + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p init OMX:%p", this, mOMX.get()); + + if (!mReservation->ReserveOMXCodec()) { + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p Decoder in use", this); + return WEBRTC_VIDEO_CODEC_ERROR; + } + + // Defer configuration until SPS/PPS NALUs (where actual decoder config + // values can be extracted) are received. + + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p OMX Decoder reserved", this); + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcOMXH264VideoDecoder::Decode(const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + int64_t aRenderTimeMs) +{ + if (aInputImage._length== 0 || !aInputImage._buffer) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + + bool configured = !!mOMX; + if (!configured) { + // Search for SPS NALU in input to get width/height config. + int32_t width; + int32_t height; + status_t result = WebrtcOMXDecoder::ExtractPicDimensions(aInputImage._buffer, + aInputImage._length, + &width, &height); + if (result != OK) { + // Cannot config decoder because SPS haven't been seen. + CODEC_LOGI("WebrtcOMXH264VideoDecoder:%p missing SPS in input (nal 0x%02x, len %d)", + this, aInputImage._buffer[sizeof(kNALStartCode)] & 0x1f, aInputImage._length); + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + RefPtr<WebrtcOMXDecoder> omx = new WebrtcOMXDecoder(MEDIA_MIMETYPE_VIDEO_AVC, + mCallback); + result = omx->ConfigureWithPicDimensions(width, height); + if (NS_WARN_IF(result != OK)) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p start OMX", this); + mOMX = omx; + } + + bool feedFrame = true; + while (feedFrame) { + status_t err = mOMX->FillInput(aInputImage, !configured, aRenderTimeMs); + feedFrame = (err == -EAGAIN); // No input buffer available. Try again. + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcOMXH264VideoDecoder::RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* aCallback) +{ + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p set callback:%p", this, aCallback); + MOZ_ASSERT(aCallback); + mCallback = aCallback; + + return WEBRTC_VIDEO_CODEC_OK; +} + +int32_t +WebrtcOMXH264VideoDecoder::Release() +{ + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be released", this); + + mOMX = nullptr; // calls Stop() + mReservation->ReleaseOMXCodec(); + + return WEBRTC_VIDEO_CODEC_OK; +} + +WebrtcOMXH264VideoDecoder::~WebrtcOMXH264VideoDecoder() +{ + CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be destructed", this); + Release(); +} + +int32_t +WebrtcOMXH264VideoDecoder::Reset() +{ + CODEC_LOGW("WebrtcOMXH264VideoDecoder::Reset() will NOT reset decoder"); + return WEBRTC_VIDEO_CODEC_OK; +} + +} diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h new file mode 100644 index 000000000..71cf5c681 --- /dev/null +++ b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h @@ -0,0 +1,108 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WEBRTC_GONK +#pragma error WebrtcOMXH264VideoCodec works only on B2G. +#endif + +#ifndef WEBRTC_OMX_H264_CODEC_H_ +#define WEBRTC_OMX_H264_CODEC_H_ + +#include "AudioConduit.h" +#include "VideoConduit.h" +#include <foundation/ABase.h> +#include <utils/RefBase.h> +#include "OMXCodecWrapper.h" + +namespace android { + class OMXVideoEncoder; +} + +namespace mozilla { + +class WebrtcOMXDecoder; +class OMXOutputDrain; + +// XXX see if we can reduce this +#define WEBRTC_OMX_H264_MIN_DECODE_BUFFERS 10 +#define OMX_IDR_NEEDED_FOR_BITRATE 0 + +class WebrtcOMXH264VideoEncoder : public WebrtcVideoEncoder +{ +public: + WebrtcOMXH264VideoEncoder(); + + virtual ~WebrtcOMXH264VideoEncoder(); + + // Implement VideoEncoder interface. + virtual uint64_t PluginID() const override { return 0; } + + virtual int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumOfCores, + size_t aMaxPayloadSize) override; + + virtual int32_t Encode(const webrtc::I420VideoFrame& aInputImage, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo, + const std::vector<webrtc::VideoFrameType>* aFrameTypes) override; + + virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* aCallback) override; + + virtual int32_t Release() override; + + virtual int32_t SetChannelParameters(uint32_t aPacketLossRate, + int64_t aRoundTripTimeMs) override; + + virtual int32_t SetRates(uint32_t aBitRate, uint32_t aFrameRate) override; + +private: + nsAutoPtr<android::OMXVideoEncoder> mOMX; + android::sp<android::OMXCodecReservation> mReservation; + + webrtc::EncodedImageCallback* mCallback; + RefPtr<OMXOutputDrain> mOutputDrain; + uint32_t mWidth; + uint32_t mHeight; + uint32_t mFrameRate; + uint32_t mBitRateKbps; +#ifdef OMX_IDR_NEEDED_FOR_BITRATE + uint32_t mBitRateAtLastIDR; + TimeStamp mLastIDRTime; +#endif + bool mOMXConfigured; + bool mOMXReconfigure; + webrtc::EncodedImage mEncodedImage; +}; + +class WebrtcOMXH264VideoDecoder : public WebrtcVideoDecoder +{ +public: + WebrtcOMXH264VideoDecoder(); + + virtual ~WebrtcOMXH264VideoDecoder(); + + // Implement VideoDecoder interface. + virtual uint64_t PluginID() const override { return 0; } + + virtual int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings, + int32_t aNumOfCores) override; + virtual int32_t Decode(const webrtc::EncodedImage& aInputImage, + bool aMissingFrames, + const webrtc::RTPFragmentationHeader* aFragmentation, + const webrtc::CodecSpecificInfo* aCodecSpecificInfo = nullptr, + int64_t aRenderTimeMs = -1) override; + virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) override; + + virtual int32_t Release() override; + + virtual int32_t Reset() override; + +private: + webrtc::DecodedImageCallback* mCallback; + RefPtr<WebrtcOMXDecoder> mOMX; + android::sp<android::OMXCodecReservation> mReservation; +}; + +} + +#endif // WEBRTC_OMX_H264_CODEC_H_ |