summaryrefslogtreecommitdiffstats
path: root/media/webrtc/signaling
diff options
context:
space:
mode:
Diffstat (limited to 'media/webrtc/signaling')
-rw-r--r--media/webrtc/signaling/signaling.gyp379
-rw-r--r--media/webrtc/signaling/src/common/CommonTypes.h63
-rw-r--r--media/webrtc/signaling/src/common/EncodingConstraints.h58
-rwxr-xr-xmedia/webrtc/signaling/src/common/MediaEngineWrapper.h39
-rw-r--r--media/webrtc/signaling/src/common/NullDeleter.h16
-rw-r--r--media/webrtc/signaling/src/common/NullTransport.h44
-rw-r--r--media/webrtc/signaling/src/common/PtrVector.h43
-rw-r--r--media/webrtc/signaling/src/common/Wrapper.h175
-rw-r--r--media/webrtc/signaling/src/common/YuvStamper.cpp469
-rw-r--r--media/webrtc/signaling/src/common/YuvStamper.h83
-rw-r--r--media/webrtc/signaling/src/common/browser_logging/CSFLog.cpp103
-rw-r--r--media/webrtc/signaling/src/common/browser_logging/CSFLog.h50
-rw-r--r--media/webrtc/signaling/src/common/browser_logging/WebRtcLog.cpp258
-rw-r--r--media/webrtc/signaling/src/common/browser_logging/WebRtcLog.h16
-rw-r--r--media/webrtc/signaling/src/common/csf_common.h79
-rw-r--r--media/webrtc/signaling/src/common/time_profiling/timecard.c125
-rw-r--r--media/webrtc/signaling/src/common/time_profiling/timecard.h81
-rw-r--r--media/webrtc/signaling/src/jsep/JsepCodecDescription.h780
-rw-r--r--media/webrtc/signaling/src/jsep/JsepSession.h243
-rw-r--r--media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp2497
-rw-r--r--media/webrtc/signaling/src/jsep/JsepSessionImpl.h352
-rw-r--r--media/webrtc/signaling/src/jsep/JsepTrack.cpp531
-rw-r--r--media/webrtc/signaling/src/jsep/JsepTrack.h292
-rw-r--r--media/webrtc/signaling/src/jsep/JsepTrackEncoding.h60
-rw-r--r--media/webrtc/signaling/src/jsep/JsepTransport.h116
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/AudioConduit.cpp1134
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/AudioConduit.h304
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/CodecConfig.h166
-rw-r--r--media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp183
-rw-r--r--media/webrtc/signaling/src/media-conduit/CodecStatistics.h111
-rw-r--r--media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp18
-rw-r--r--media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h19
-rw-r--r--media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp31
-rw-r--r--media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h31
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/MediaConduitErrors.h48
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/MediaConduitInterface.h495
-rw-r--r--media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp30
-rw-r--r--media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h32
-rw-r--r--media/webrtc/signaling/src/media-conduit/RunningStat.h66
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/VideoConduit.cpp2129
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/VideoConduit.h429
-rwxr-xr-xmedia/webrtc/signaling/src/media-conduit/VideoTypes.h62
-rw-r--r--media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp965
-rw-r--r--media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h528
-rw-r--r--media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp1004
-rw-r--r--media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h114
-rw-r--r--media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp1253
-rw-r--r--media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h108
-rw-r--r--media/webrtc/signaling/src/media/CSFAudioControlWrapper.cpp149
-rw-r--r--media/webrtc/signaling/src/media/CSFAudioControlWrapper.h42
-rw-r--r--media/webrtc/signaling/src/media/CSFAudioTermination.h117
-rw-r--r--media/webrtc/signaling/src/media/CSFMediaProvider.h54
-rw-r--r--media/webrtc/signaling/src/media/CSFMediaTermination.h55
-rw-r--r--media/webrtc/signaling/src/media/CSFToneDefinitions.h137
-rw-r--r--media/webrtc/signaling/src/media/CSFVideoCallMediaControl.h28
-rw-r--r--media/webrtc/signaling/src/media/CSFVideoControlWrapper.h48
-rw-r--r--media/webrtc/signaling/src/media/CSFVideoTermination.h36
-rw-r--r--media/webrtc/signaling/src/media/cip_mmgr_mediadefinitions.h125
-rw-r--r--media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp2377
-rw-r--r--media/webrtc/signaling/src/mediapipeline/MediaPipeline.h479
-rw-r--r--media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.cpp97
-rw-r--r--media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.h86
-rw-r--r--media/webrtc/signaling/src/mediapipeline/SrtpFlow.cpp251
-rw-r--r--media/webrtc/signaling/src/mediapipeline/SrtpFlow.h68
-rw-r--r--media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp1076
-rw-r--r--media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.h82
-rw-r--r--media/webrtc/signaling/src/peerconnection/MediaStreamList.cpp104
-rw-r--r--media/webrtc/signaling/src/peerconnection/MediaStreamList.h54
-rw-r--r--media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.cpp452
-rw-r--r--media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.h109
-rw-r--r--media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp4176
-rw-r--r--media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h894
-rw-r--r--media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp1672
-rw-r--r--media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h586
-rw-r--r--media/webrtc/signaling/src/peerconnection/WebrtcGlobalChild.h40
-rw-r--r--media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.cpp1241
-rw-r--r--media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.h56
-rw-r--r--media/webrtc/signaling/src/peerconnection/WebrtcGlobalParent.h53
-rw-r--r--media/webrtc/signaling/src/sdp/Sdp.h195
-rw-r--r--media/webrtc/signaling/src/sdp/SdpAttribute.cpp1674
-rw-r--r--media/webrtc/signaling/src/sdp/SdpAttribute.h1788
-rw-r--r--media/webrtc/signaling/src/sdp/SdpAttributeList.h94
-rw-r--r--media/webrtc/signaling/src/sdp/SdpEnum.h70
-rw-r--r--media/webrtc/signaling/src/sdp/SdpErrorHolder.h50
-rw-r--r--media/webrtc/signaling/src/sdp/SdpHelper.cpp811
-rw-r--r--media/webrtc/signaling/src/sdp/SdpHelper.h131
-rw-r--r--media/webrtc/signaling/src/sdp/SdpMediaSection.cpp196
-rw-r--r--media/webrtc/signaling/src/sdp/SdpMediaSection.h361
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdp.cpp180
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdp.h88
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdpAttributeList.cpp1413
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdpAttributeList.h147
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdpMediaSection.cpp423
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdpMediaSection.h102
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdpParser.cpp83
-rw-r--r--media/webrtc/signaling/src/sdp/SipccSdpParser.h35
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/ccsdp.h207
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/ccsdp_rtcp_fb.h63
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_darwin_types.h68
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_linux_types.h82
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_string.c272
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_string.h139
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_strings.h22
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_types.h126
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/cpr_win_types.h71
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp.h1794
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_access.c2083
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_attr.c5120
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_attr_access.c6372
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_base64.c403
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_base64.h42
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_config.c241
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_main.c1342
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_os_defs.h27
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_private.h364
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_services_unix.c41
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_services_win32.c40
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_token.c1812
-rw-r--r--media/webrtc/signaling/src/sdp/sipcc/sdp_utils.c781
-rw-r--r--media/webrtc/signaling/test/FakeIPC.cpp35
-rw-r--r--media/webrtc/signaling/test/FakeIPC.h22
-rw-r--r--media/webrtc/signaling/test/FakeLogging.h26
-rw-r--r--media/webrtc/signaling/test/FakeMediaStreams.h656
-rw-r--r--media/webrtc/signaling/test/FakeMediaStreamsImpl.h236
-rw-r--r--media/webrtc/signaling/test/FakePCObserver.h112
-rw-r--r--media/webrtc/signaling/test/common.build134
-rw-r--r--media/webrtc/signaling/test/jsep_session_unittest.cpp4235
-rw-r--r--media/webrtc/signaling/test/jsep_track_unittest.cpp1269
-rw-r--r--media/webrtc/signaling/test/mediaconduit_unittests.cpp1091
-rw-r--r--media/webrtc/signaling/test/mediapipeline_unittest.cpp720
-rw-r--r--media/webrtc/signaling/test/moz.build33
-rw-r--r--media/webrtc/signaling/test/sdp_file_parser.cpp85
-rw-r--r--media/webrtc/signaling/test/sdp_unittests.cpp5377
-rw-r--r--media/webrtc/signaling/test/signaling_unittests.cpp4851
134 files changed, 79191 insertions, 0 deletions
diff --git a/media/webrtc/signaling/signaling.gyp b/media/webrtc/signaling/signaling.gyp
new file mode 100644
index 000000000..ee1941151
--- /dev/null
+++ b/media/webrtc/signaling/signaling.gyp
@@ -0,0 +1,379 @@
+# Copyright (c) 2011, The WebRTC project authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# * Neither the name of Google nor the names of its contributors may
+# be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Indent 2 spaces, no tabs.
+#
+#
+# sip.gyp - a library for SIP
+#
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+
+ 'target_defaults': {
+ 'conditions': [
+ ['moz_widget_toolkit_gonk==1', {
+ 'defines' : [
+ 'WEBRTC_GONK',
+ ],
+ }],
+ ],
+ },
+
+ 'targets': [
+
+ #
+ # ECC
+ #
+ {
+ 'target_name': 'ecc',
+ 'type': 'static_library',
+
+ #
+ # INCLUDES
+ #
+ 'include_dirs': [
+ '..',
+ './src',
+ './src/common',
+ './src/common/browser_logging',
+ './src/common/time_profiling',
+ './src/media',
+ './src/media-conduit',
+ './src/mediapipeline',
+ './src/peerconnection',
+ './src/sdp/sipcc',
+ '../../../dom/base',
+ '../../../dom/media',
+ '../../../media/mtransport',
+ '../trunk',
+ '../../libyuv/include',
+ '../../mtransport/third_party/nrappkit/src/util/libekr',
+ ],
+
+ #
+ # DEPENDENCIES
+ #
+ 'dependencies': [
+ ],
+
+ 'export_dependent_settings': [
+ ],
+
+
+ #
+ # SOURCES
+ #
+ 'sources': [
+ # Media Conduit
+ './src/media-conduit/AudioConduit.h',
+ './src/media-conduit/AudioConduit.cpp',
+ './src/media-conduit/VideoConduit.h',
+ './src/media-conduit/VideoConduit.cpp',
+ './src/media-conduit/CodecStatistics.h',
+ './src/media-conduit/CodecStatistics.cpp',
+ './src/media-conduit/RunningStat.h',
+ # Common
+ './src/common/CommonTypes.h',
+ './src/common/csf_common.h',
+ './src/common/NullDeleter.h',
+ './src/common/PtrVector.h',
+ './src/common/Wrapper.h',
+ './src/common/NullTransport.h',
+ './src/common/YuvStamper.cpp',
+ # Browser Logging
+ './src/common/browser_logging/CSFLog.cpp',
+ './src/common/browser_logging/CSFLog.h',
+ './src/common/browser_logging/WebRtcLog.cpp',
+ './src/common/browser_logging/WebRtcLog.h',
+ # Browser Logging
+ './src/common/time_profiling/timecard.c',
+ './src/common/time_profiling/timecard.h',
+ # PeerConnection
+ './src/peerconnection/MediaPipelineFactory.cpp',
+ './src/peerconnection/MediaPipelineFactory.h',
+ './src/peerconnection/PeerConnectionCtx.cpp',
+ './src/peerconnection/PeerConnectionCtx.h',
+ './src/peerconnection/PeerConnectionImpl.cpp',
+ './src/peerconnection/PeerConnectionImpl.h',
+ './src/peerconnection/PeerConnectionMedia.cpp',
+ './src/peerconnection/PeerConnectionMedia.h',
+ # Media pipeline
+ './src/mediapipeline/MediaPipeline.h',
+ './src/mediapipeline/MediaPipeline.cpp',
+ './src/mediapipeline/MediaPipelineFilter.h',
+ './src/mediapipeline/MediaPipelineFilter.cpp',
+ # SDP
+ './src/sdp/sipcc/ccsdp.h',
+ './src/sdp/sipcc/cpr_string.c',
+ './src/sdp/sipcc/sdp_access.c',
+ './src/sdp/sipcc/sdp_attr.c',
+ './src/sdp/sipcc/sdp_attr_access.c',
+ './src/sdp/sipcc/sdp_base64.c',
+ './src/sdp/sipcc/sdp_config.c',
+ './src/sdp/sipcc/sdp_main.c',
+ './src/sdp/sipcc/sdp_token.c',
+ './src/sdp/sipcc/sdp.h',
+ './src/sdp/sipcc/sdp_base64.h',
+ './src/sdp/sipcc/sdp_os_defs.h',
+ './src/sdp/sipcc/sdp_private.h',
+ './src/sdp/sipcc/sdp_utils.c',
+ './src/sdp/sipcc/sdp_services_unix.c',
+
+ # SDP Wrapper
+ './src/sdp/Sdp.h',
+ './src/sdp/SdpAttribute.h',
+ './src/sdp/SdpAttribute.cpp',
+ './src/sdp/SdpAttributeList.h',
+ './src/sdp/SdpErrorHolder.h',
+ './src/sdp/SdpHelper.h',
+ './src/sdp/SdpHelper.cpp',
+ './src/sdp/SdpMediaSection.h',
+ './src/sdp/SdpMediaSection.cpp',
+ './src/sdp/SipccSdp.h',
+ './src/sdp/SipccSdpAttributeList.h',
+ './src/sdp/SipccSdpAttributeList.cpp',
+ './src/sdp/SipccSdpMediaSection.h',
+ './src/sdp/SipccSdpParser.h',
+ './src/sdp/SipccSdp.cpp',
+ './src/sdp/SipccSdpMediaSection.cpp',
+ './src/sdp/SipccSdpParser.cpp',
+
+ # JSEP
+ './src/jsep/JsepCodecDescription.h',
+ './src/jsep/JsepSession.h',
+ './src/jsep/JsepSessionImpl.cpp',
+ './src/jsep/JsepSessionImpl.h',
+ './src/jsep/JsepTrack.cpp',
+ './src/jsep/JsepTrack.h',
+ './src/jsep/JsepTrackEncoding.h',
+ './src/jsep/JsepTransport.h'
+ ],
+
+ #
+ # DEFINES
+ #
+
+ 'defines' : [
+ 'LOG4CXX_STATIC',
+ '_NO_LOG4CXX',
+ 'USE_SSLEAY',
+ '_CPR_USE_EXTERNAL_LOGGER',
+ 'WEBRTC_RELATIVE_PATH',
+ 'HAVE_WEBRTC_VIDEO',
+ 'HAVE_WEBRTC_VOICE',
+ 'HAVE_STDINT_H=1',
+ 'HAVE_STDLIB_H=1',
+ 'HAVE_UINT8_T=1',
+ 'HAVE_UINT16_T=1',
+ 'HAVE_UINT32_T=1',
+ 'HAVE_UINT64_T=1',
+ ],
+
+ 'cflags_mozilla': [
+ '$(NSPR_CFLAGS)',
+ '$(NSS_CFLAGS)',
+ '$(MOZ_PIXMAN_CFLAGS)',
+ ],
+
+
+ #
+ # Conditionals
+ #
+ 'conditions': [
+ # hack so I can change the include flow for SrtpFlow
+ ['build_with_mozilla==1', {
+ 'sources': [
+ './src/mediapipeline/SrtpFlow.h',
+ './src/mediapipeline/SrtpFlow.cpp',
+ ],
+ 'include_dirs!': [
+ '../trunk/webrtc',
+ ],
+ 'include_dirs': [
+ '../../../netwerk/srtp/src/include',
+ '../../../netwerk/srtp/src/crypto/include',
+ ],
+ }],
+ ['moz_webrtc_omx==1', {
+ 'sources': [
+ './src/media-conduit/WebrtcOMXH264VideoCodec.cpp',
+ './src/media-conduit/OMXVideoCodec.cpp',
+ ],
+ 'include_dirs': [
+ # hack on hack to re-add it after SrtpFlow removes it
+ '../../../dom/media/omx',
+ '../../../gfx/layers/client',
+ ],
+ 'cflags_mozilla': [
+ '-I$(ANDROID_SOURCE)/frameworks/av/include/media/stagefright',
+ '-I$(ANDROID_SOURCE)/frameworks/av/include',
+ '-I$(ANDROID_SOURCE)/frameworks/native/include/media/openmax',
+ '-I$(ANDROID_SOURCE)/frameworks/native/include',
+ '-I$(ANDROID_SOURCE)/frameworks/native/opengl/include',
+ ],
+ 'defines' : [
+ 'MOZ_WEBRTC_OMX'
+ ],
+ }],
+ ['moz_webrtc_mediacodec==1', {
+ 'include_dirs': [
+ '../../../widget/android',
+ ],
+ 'sources': [
+ './src/media-conduit/MediaCodecVideoCodec.h',
+ './src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h',
+ './src/media-conduit/MediaCodecVideoCodec.cpp',
+ './src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp',
+ ],
+ 'defines' : [
+ 'MOZ_WEBRTC_MEDIACODEC',
+ ],
+ }],
+ ['(build_for_test==0) and (build_for_standalone==0)', {
+ 'defines' : [
+ 'MOZILLA_INTERNAL_API',
+ ],
+ 'sources': [
+ './src/peerconnection/MediaStreamList.cpp',
+ './src/peerconnection/MediaStreamList.h',
+ './src/peerconnection/WebrtcGlobalInformation.cpp',
+ './src/peerconnection/WebrtcGlobalInformation.h',
+ ],
+ }],
+ ['build_for_test!=0', {
+ 'include_dirs': [
+ './test'
+ ],
+ 'defines' : [
+ 'NO_CHROMIUM_LOGGING',
+ 'USE_FAKE_MEDIA_STREAMS',
+ 'USE_FAKE_PCOBSERVER',
+ 'MOZILLA_EXTERNAL_LINKAGE',
+ ],
+ }],
+ ['build_for_standalone==0', {
+ 'sources': [
+ './src/media-conduit/GmpVideoCodec.cpp',
+ './src/media-conduit/WebrtcGmpVideoCodec.cpp',
+ ],
+ }],
+ ['build_for_standalone!=0', {
+ 'include_dirs': [
+ './test'
+ ],
+ 'defines' : [
+ 'MOZILLA_INTERNAL_API',
+ 'MOZILLA_EXTERNAL_LINKAGE',
+ 'NO_CHROMIUM_LOGGING',
+ 'USE_FAKE_MEDIA_STREAMS',
+ 'USE_FAKE_PCOBSERVER',
+ ],
+ }],
+ ['(OS=="linux") or (OS=="android")', {
+ 'include_dirs': [
+ ],
+
+ 'defines': [
+ 'OS_LINUX',
+ 'SIP_OS_LINUX',
+ 'WEBRTC_POSIX',
+ '_GNU_SOURCE',
+ 'LINUX',
+ 'GIPS_VER=3510',
+ 'SECLIB_OPENSSL',
+ ],
+
+ 'cflags_mozilla': [
+ ],
+ }],
+ ['OS=="android" or moz_widget_toolkit_gonk==1', {
+ 'cflags_mozilla': [
+ # This warning complains about important MOZ_EXPORT attributes
+ # on forward declarations for Android API types.
+ '-Wno-error=attributes',
+ ],
+ }],
+ ['OS=="win"', {
+ 'include_dirs': [
+ ],
+ 'defines': [
+ 'OS_WIN',
+ 'SIP_OS_WINDOWS',
+ 'WEBRTC_WIN',
+ 'WIN32',
+ 'GIPS_VER=3480',
+ 'SIPCC_BUILD',
+ 'HAVE_WINSOCK2_H'
+ ],
+
+ 'cflags_mozilla': [
+ ],
+ }],
+ ['os_bsd==1', {
+ 'include_dirs': [
+ ],
+ 'defines': [
+ # avoiding pointless ifdef churn
+ 'WEBRTC_POSIX',
+ 'SIP_OS_OSX',
+ 'OSX',
+ 'SECLIB_OPENSSL',
+ ],
+
+ 'cflags_mozilla': [
+ ],
+ }],
+ ['OS=="mac" or OS=="ios"', {
+ 'include_dirs': [
+ ],
+ 'defines': [
+ 'WEBRTC_POSIX',
+ 'OS_MACOSX',
+ 'SIP_OS_OSX',
+ 'OSX',
+ '_FORTIFY_SOURCE=2',
+ ],
+
+ 'cflags_mozilla': [
+ ],
+ }],
+ ],
+ },
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/media/webrtc/signaling/src/common/CommonTypes.h b/media/webrtc/signaling/src/common/CommonTypes.h
new file mode 100644
index 000000000..96f1f3423
--- /dev/null
+++ b/media/webrtc/signaling/src/common/CommonTypes.h
@@ -0,0 +1,63 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#pragma once
+
+
+#include <string>
+
+namespace csf
+{
+
+namespace ProviderStateEnum
+{
+ enum ProviderState
+ {
+ Ready,
+ Registering,
+ AwaitingIpAddress,
+ FetchingDeviceConfig,
+ Idle,
+ RecoveryPending,
+ Connected
+ };
+ const std::string toString(ProviderState);
+}
+namespace LoginErrorStatusEnum
+{
+ enum LoginErrorStatus {
+ Ok, // No Error
+ Unknown, // Unknown Error
+ NoCallManagerConfigured, // No Primary or Backup Call Manager
+ NoDevicesFound, // No devices
+ NoCsfDevicesFound, // Devices but none of type CSF
+ PhoneConfigGenError, // Could not generate phone config
+ SipProfileGenError, // Could not build SIP profile
+ ConfigNotSet, // Config not set before calling login()
+ CreateConfigProviderFailed, // Could not create ConfigProvider
+ CreateSoftPhoneProviderFailed, // Could not create SoftPhoneProvider
+ MissingUsername, // Username argument missing,
+ ManualLogout, // logout() has been called
+ LoggedInElseWhere, // Another process has the mutex indicating it is logged in
+ AuthenticationFailure, // Authentication failure (probably bad password, but best not to say for sure)
+ CtiCouldNotConnect, // Could not connect to CTI service
+ InvalidServerSearchList
+ };
+ const std::string toString(LoginErrorStatus);
+}
+
+namespace ErrorCodeEnum
+{
+ enum ErrorCode
+ {
+ Ok,
+ Unknown,
+ InvalidState,
+ InvalidArgument
+ };
+ const std::string toString(ErrorCode);
+}
+
+} // namespace csf
+
diff --git a/media/webrtc/signaling/src/common/EncodingConstraints.h b/media/webrtc/signaling/src/common/EncodingConstraints.h
new file mode 100644
index 000000000..efba7c51c
--- /dev/null
+++ b/media/webrtc/signaling/src/common/EncodingConstraints.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _ENCODING_CONSTRAINTS_H_
+#define _ENCODING_CONSTRAINTS_H_
+
+#include <algorithm>
+
+namespace mozilla
+{
+class EncodingConstraints
+{
+public:
+ EncodingConstraints() :
+ maxWidth(0),
+ maxHeight(0),
+ maxFps(0),
+ maxFs(0),
+ maxBr(0),
+ maxPps(0),
+ maxMbps(0),
+ maxCpb(0),
+ maxDpb(0),
+ scaleDownBy(1.0)
+ {}
+
+ bool operator==(const EncodingConstraints& constraints) const
+ {
+ return
+ maxWidth == constraints.maxWidth &&
+ maxHeight == constraints.maxHeight &&
+ maxFps == constraints.maxFps &&
+ maxFs == constraints.maxFs &&
+ maxBr == constraints.maxBr &&
+ maxPps == constraints.maxPps &&
+ maxMbps == constraints.maxMbps &&
+ maxCpb == constraints.maxCpb &&
+ maxDpb == constraints.maxDpb &&
+ scaleDownBy == constraints.scaleDownBy;
+ }
+
+ uint32_t maxWidth;
+ uint32_t maxHeight;
+ uint32_t maxFps;
+ uint32_t maxFs;
+ uint32_t maxBr;
+ uint32_t maxPps;
+ uint32_t maxMbps; // macroblocks per second
+ uint32_t maxCpb; // coded picture buffer size
+ uint32_t maxDpb; // decoded picture buffer size
+ double scaleDownBy; // To preserve resolution
+};
+} // namespace mozilla
+
+#endif // _ENCODING_CONSTRAINTS_H_
diff --git a/media/webrtc/signaling/src/common/MediaEngineWrapper.h b/media/webrtc/signaling/src/common/MediaEngineWrapper.h
new file mode 100755
index 000000000..f9b1a3415
--- /dev/null
+++ b/media/webrtc/signaling/src/common/MediaEngineWrapper.h
@@ -0,0 +1,39 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIA_ENGINE_WRAPPER_H_
+#define MEDIA_ENGINE_WRAPPER_H_
+
+#include <mozilla/Scoped.h>
+
+
+
+namespace mozilla
+{
+/**
+ * A Custom scoped template to release a resoure of Type T
+ * with a function of Type F
+ * ScopedCustomReleasePtr<webrtc::VoENetwork> ptr =
+ * webrtc::VoENetwork->GetInterface(voiceEngine);
+ *
+ */
+template<typename T>
+struct ScopedCustomReleaseTraits0
+{
+ typedef T* type;
+ static T* empty() { return nullptr; }
+ static void release(T* ptr)
+ {
+ if(ptr)
+ {
+ (ptr)->Release();
+ }
+ }
+};
+
+SCOPED_TEMPLATE(ScopedCustomReleasePtr, ScopedCustomReleaseTraits0)
+}//namespace
+
+
+#endif
diff --git a/media/webrtc/signaling/src/common/NullDeleter.h b/media/webrtc/signaling/src/common/NullDeleter.h
new file mode 100644
index 000000000..9b4628390
--- /dev/null
+++ b/media/webrtc/signaling/src/common/NullDeleter.h
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#pragma once
+
+/*
+ * Helper class to allow smart pointers to stack objects to be constructed for ease of unit testing.
+ * Recycled here to help expose a shared_ptr interface to objects which are really raw pointers.
+ */
+struct null_deleter
+{
+ void operator()(void const *) const
+ {
+ }
+};
diff --git a/media/webrtc/signaling/src/common/NullTransport.h b/media/webrtc/signaling/src/common/NullTransport.h
new file mode 100644
index 000000000..bce793304
--- /dev/null
+++ b/media/webrtc/signaling/src/common/NullTransport.h
@@ -0,0 +1,44 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+#ifndef NULL_TRANSPORT_H_
+#define NULL_TRANSPORT_H_
+
+#include "mozilla/Attributes.h"
+
+#include "webrtc/common_types.h"
+
+namespace mozilla {
+
+/**
+ * NullTransport is registered as ExternalTransport to throw away data
+ */
+class NullTransport : public webrtc::Transport
+{
+public:
+ virtual int SendPacket(int channel, const void *data, size_t len)
+ {
+ (void) channel; (void) data;
+ return len;
+ }
+
+ virtual int SendRTCPPacket(int channel, const void *data, size_t len)
+ {
+ (void) channel; (void) data;
+ return len;
+ }
+
+ NullTransport() {}
+
+ virtual ~NullTransport() {}
+
+private:
+ NullTransport(const NullTransport& other) = delete;
+ void operator=(const NullTransport& other) = delete;
+};
+
+} // end namespace
+
+#endif
diff --git a/media/webrtc/signaling/src/common/PtrVector.h b/media/webrtc/signaling/src/common/PtrVector.h
new file mode 100644
index 000000000..68c760472
--- /dev/null
+++ b/media/webrtc/signaling/src/common/PtrVector.h
@@ -0,0 +1,43 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef PtrVector_h
+#define PtrVector_h
+
+#include <mozilla/Move.h>
+#include <vector>
+
+namespace mozilla
+{
+
+// Trivial wrapper class around a vector of ptrs.
+// TODO: Remove this once our buildconfig allows us to put unique_ptr in stl
+// containers, and just use std::vector<unique_ptr<T>> instead.
+template <class T> class PtrVector
+{
+public:
+ PtrVector() = default;
+ PtrVector(const PtrVector&) = delete;
+ PtrVector(PtrVector&& aOther)
+ : values(Move(aOther.values))
+ {}
+ PtrVector& operator=(const PtrVector&) = delete;
+ PtrVector& operator=(PtrVector&& aOther)
+ {
+ Swap(values, aOther.values);
+ return *this;
+ }
+
+ ~PtrVector()
+ {
+ for (T* value : values) { delete value; }
+ }
+
+ std::vector<T*> values;
+};
+
+} // namespace mozilla
+
+#endif // PtrVector_h
+
diff --git a/media/webrtc/signaling/src/common/Wrapper.h b/media/webrtc/signaling/src/common/Wrapper.h
new file mode 100644
index 000000000..a88cbd4bf
--- /dev/null
+++ b/media/webrtc/signaling/src/common/Wrapper.h
@@ -0,0 +1,175 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#pragma once
+
+/*
+ * Wrapper - Helper class for wrapper objects.
+ *
+ * This helps to construct a shared_ptr object which wraps access to an underlying handle.
+ * (The handle could be a pointer to some low-level type, a conventional C handle, an int ID, a GUID, etc.)
+ *
+ * Usage:
+ * To obtain a FooPtr from a foo_handle_t, call FooPtr Foo::wrap(foo_handle_t);
+ *
+ * To implement Foo using Wrapper, Foo needs to include this macro in its class definition:
+ * CSF_DECLARE_WRAP(Foo, foo_handle_t);
+ * It also needs to include this in the cpp file, to provide the wrap() implementation and define the static Wrapper.
+ * CSF_IMPLEMENT_WRAP(Foo, foo_handle_t);
+ * These are all declared in common/Wrapper.h - Foo.h needs to include this too.
+ * The client needs to declare Foo(foo_handle_t) as private, and provide a suitable implementation, as well as
+ * implementing wrappers for any other functions to be exposed.
+ * The client needs to implement ~Foo() to perform any cleanup as usual.
+ *
+ * wrap() will always return the same FooPtr for a given foo_handle_t, it will not construct additional objects
+ * if a suitable one already exists.
+ * changeHandle() is used in rare cases where the underlying handle is changed, but the wrapper object is intended
+ * to remain. This is the case for the "fake" CC_DPCall generated on CC_DPLine::CreateCall(), where
+ * the correct IDPCall* is provided later.
+ * reset() is a cleanup step to wipe the handle map and allow memory to be reclaimed.
+ *
+ * Future enhancements:
+ * - For now, objects remain in the map forever. Better would be to add a releaseHandle() function which would
+ * allow the map to be emptied as underlying handles expired. While we can't force the client to give up its
+ * shared_ptr<Foo> objects, we can remove our own copy, for instance on a call ended event.
+ */
+
+#include <map>
+#include "prlock.h"
+#include "mozilla/Assertions.h"
+
+/*
+ * Wrapper has its own autolock class because the instances are declared
+ * statically and mozilla::Mutex will not work properly when instantiated
+ * in a static constructor.
+ */
+
+class LockNSPR {
+public:
+ LockNSPR() : lock_(nullptr) {
+ lock_ = PR_NewLock();
+ MOZ_ASSERT(lock_);
+ }
+ ~LockNSPR() {
+ PR_DestroyLock(lock_);
+ }
+
+ void Acquire() {
+ PR_Lock(lock_);
+ }
+
+ void Release() {
+ PR_Unlock(lock_);
+ }
+
+private:
+ PRLock *lock_;
+};
+
+class AutoLockNSPR {
+public:
+ explicit AutoLockNSPR(LockNSPR& lock) : lock_(lock) {
+ lock_.Acquire();
+ }
+ ~AutoLockNSPR() {
+ lock_.Release();
+ }
+
+private:
+ LockNSPR& lock_;
+};
+
+template <class T>
+class Wrapper
+{
+private:
+ typedef std::map<typename T::Handle, typename T::Ptr> HandleMapType;
+ HandleMapType handleMap;
+ LockNSPR handleMapMutex;
+
+public:
+ Wrapper() {}
+
+ typename T::Ptr wrap(typename T::Handle handle)
+ {
+ AutoLockNSPR lock(handleMapMutex);
+ typename HandleMapType::iterator it = handleMap.find(handle);
+ if(it != handleMap.end())
+ {
+ return it->second;
+ }
+ else
+ {
+ typename T::Ptr p(new T(handle));
+ handleMap[handle] = p;
+ return p;
+ }
+ }
+
+ bool changeHandle(typename T::Handle oldHandle, typename T::Handle newHandle)
+ {
+ AutoLockNSPR lock(handleMapMutex);
+ typename HandleMapType::iterator it = handleMap.find(oldHandle);
+ if(it != handleMap.end())
+ {
+ typename T::Ptr p = it->second;
+ handleMap.erase(it);
+ handleMap[newHandle] = p;
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ bool release(typename T::Handle handle)
+ {
+ AutoLockNSPR lock(handleMapMutex);
+ typename HandleMapType::iterator it = handleMap.find(handle);
+ if(it != handleMap.end())
+ {
+ handleMap.erase(it);
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ void reset()
+ {
+ AutoLockNSPR lock(handleMapMutex);
+ handleMap.clear();
+ }
+};
+
+#define CSF_DECLARE_WRAP(classname, handletype) \
+ public: \
+ static classname ## Ptr wrap(handletype handle); \
+ static void reset(); \
+ static void release(handletype handle); \
+ private: \
+ friend class Wrapper<classname>; \
+ typedef classname ## Ptr Ptr; \
+ typedef handletype Handle; \
+ static Wrapper<classname>& getWrapper() { \
+ static Wrapper<classname> wrapper; \
+ return wrapper; \
+ }
+
+#define CSF_IMPLEMENT_WRAP(classname, handletype) \
+ classname ## Ptr classname::wrap(handletype handle) \
+ { \
+ return getWrapper().wrap(handle); \
+ } \
+ void classname::reset() \
+ { \
+ getWrapper().reset(); \
+ } \
+ void classname::release(handletype handle) \
+ { \
+ getWrapper().release(handle); \
+ }
diff --git a/media/webrtc/signaling/src/common/YuvStamper.cpp b/media/webrtc/signaling/src/common/YuvStamper.cpp
new file mode 100644
index 000000000..892b640bf
--- /dev/null
+++ b/media/webrtc/signaling/src/common/YuvStamper.cpp
@@ -0,0 +1,469 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#elif defined XP_WIN
+#include <winsock2.h>
+#endif
+#include <string.h>
+
+#include "nspr.h"
+#include "YuvStamper.h"
+#include "mozilla/Sprintf.h"
+
+typedef uint32_t UINT4; //Needed for r_crc32() call
+extern "C" {
+#include "r_crc32.h"
+}
+
+namespace mozilla {
+
+#define ON_5 0x20
+#define ON_4 0x10
+#define ON_3 0x08
+#define ON_2 0x04
+#define ON_1 0x02
+#define ON_0 0x01
+
+/*
+ 0, 0, 1, 1, 0, 0,
+ 0, 1, 0, 0, 1, 0,
+ 1, 0, 0, 0, 0, 1,
+ 1, 0, 0, 0, 0, 1,
+ 1, 0, 0, 0, 0, 1,
+ 0, 1, 0, 0, 1, 0,
+ 0, 0, 1, 1, 0, 0
+*/
+static unsigned char DIGIT_0 [] =
+ { ON_3 | ON_2,
+ ON_4 | ON_1,
+ ON_5 | ON_0,
+ ON_5 | ON_0,
+ ON_5 | ON_0,
+ ON_4 | ON_1,
+ ON_3 | ON_2
+ };
+
+/*
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0, 0,
+*/
+static unsigned char DIGIT_1 [] =
+ { ON_2,
+ ON_2,
+ ON_2,
+ ON_2,
+ ON_2,
+ ON_2,
+ ON_2
+ };
+
+/*
+ 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 0,
+ 1, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1,
+*/
+static unsigned char DIGIT_2 [] =
+ { ON_5 | ON_4 | ON_3 | ON_2 | ON_1,
+ ON_0,
+ ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ ON_5,
+ ON_5,
+ ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ };
+
+/*
+ 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 0,
+*/
+static unsigned char DIGIT_3 [] =
+ { ON_5 | ON_4 | ON_3 | ON_2 | ON_1,
+ ON_0,
+ ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_0,
+ ON_0,
+ ON_5 | ON_4 | ON_3 | ON_2 | ON_1,
+ };
+
+/*
+ 0, 1, 0, 0, 0, 1,
+ 0, 1, 0, 0, 0, 1,
+ 0, 1, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1
+*/
+static unsigned char DIGIT_4 [] =
+ { ON_4 | ON_0,
+ ON_4 | ON_0,
+ ON_4 | ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_0,
+ ON_0,
+ ON_0,
+ };
+
+/*
+ 0, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 0,
+*/
+static unsigned char DIGIT_5 [] =
+ { ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_5,
+ ON_5,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ ON_0,
+ ON_0,
+ ON_5 | ON_4 | ON_3 | ON_2 | ON_1,
+ };
+
+/*
+ 0, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 0,
+ 1, 0, 0, 0, 0, 1,
+ 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 0,
+*/
+static unsigned char DIGIT_6 [] =
+ { ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_5,
+ ON_5,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ ON_5 | ON_0,
+ ON_5 | ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ };
+
+/*
+ 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 1, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0
+*/
+static unsigned char DIGIT_7 [] =
+ { ON_5 | ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_0,
+ ON_1,
+ ON_2,
+ ON_3,
+ ON_4,
+ ON_5
+ };
+
+/*
+ 0, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 1,
+ 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 0,
+ 1, 0, 0, 0, 0, 1,
+ 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 0
+*/
+static unsigned char DIGIT_8 [] =
+ { ON_4 | ON_3 | ON_2 | ON_1,
+ ON_5 | ON_0,
+ ON_5 | ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ ON_5 | ON_0,
+ ON_5 | ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ };
+
+/*
+ 0, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 1,
+ 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 0
+*/
+static unsigned char DIGIT_9 [] =
+ { ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_5 | ON_0,
+ ON_5 | ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1 | ON_0,
+ ON_0,
+ ON_0,
+ ON_4 | ON_3 | ON_2 | ON_1,
+ };
+
+static unsigned char *DIGITS[] = {
+ DIGIT_0,
+ DIGIT_1,
+ DIGIT_2,
+ DIGIT_3,
+ DIGIT_4,
+ DIGIT_5,
+ DIGIT_6,
+ DIGIT_7,
+ DIGIT_8,
+ DIGIT_9
+};
+
+ YuvStamper::YuvStamper(unsigned char* pYData,
+ uint32_t width,
+ uint32_t height,
+ uint32_t stride,
+ uint32_t x,
+ uint32_t y,
+ unsigned char symbol_width,
+ unsigned char symbol_height):
+ pYData(pYData), mStride(stride),
+ mWidth(width), mHeight(height),
+ mSymbolWidth(symbol_width), mSymbolHeight(symbol_height),
+ mCursor(x, y) {}
+
+ bool YuvStamper::Encode(uint32_t width, uint32_t height, uint32_t stride,
+ unsigned char* pYData, unsigned char* pMsg, size_t msg_len,
+ uint32_t x, uint32_t y)
+ {
+ YuvStamper stamper(pYData, width, height, stride,
+ x, y, sBitSize, sBitSize);
+
+ // Reserve space for a checksum.
+ if (stamper.Capacity() < 8 * (msg_len + sizeof(uint32_t)))
+ {
+ return false;
+ }
+
+ bool ok = false;
+ uint32_t crc;
+ unsigned char* pCrc = reinterpret_cast<unsigned char*>(&crc);
+ r_crc32(reinterpret_cast<char*>(pMsg), (int)msg_len, &crc);
+ crc = htonl(crc);
+
+ while (msg_len-- > 0) {
+ if (!stamper.Write8(*pMsg++)) {
+ return false;
+ }
+ }
+
+ // Add checksum after the message.
+ ok = stamper.Write8(*pCrc++) &&
+ stamper.Write8(*pCrc++) &&
+ stamper.Write8(*pCrc++) &&
+ stamper.Write8(*pCrc++);
+
+ return ok;
+ }
+
+ bool YuvStamper::Decode(uint32_t width, uint32_t height, uint32_t stride,
+ unsigned char* pYData, unsigned char* pMsg, size_t msg_len,
+ uint32_t x, uint32_t y)
+ {
+ YuvStamper stamper(pYData, width, height, stride,
+ x, y, sBitSize, sBitSize);
+
+ unsigned char* ptr = pMsg;
+ size_t len = msg_len;
+ uint32_t crc, msg_crc;
+ unsigned char* pCrc = reinterpret_cast<unsigned char*>(&crc);
+
+ // Account for space reserved for the checksum
+ if (stamper.Capacity() < 8 * (len + sizeof(uint32_t))) {
+ return false;
+ }
+
+ while (len-- > 0) {
+ if(!stamper.Read8(*ptr++)) {
+ return false;
+ }
+ }
+
+ if (!(stamper.Read8(*pCrc++) &&
+ stamper.Read8(*pCrc++) &&
+ stamper.Read8(*pCrc++) &&
+ stamper.Read8(*pCrc++))) {
+ return false;
+ }
+
+ r_crc32(reinterpret_cast<char*>(pMsg), (int)msg_len, &msg_crc);
+ return crc == htonl(msg_crc);
+ }
+
+ inline uint32_t YuvStamper::Capacity()
+ {
+ // Enforce at least a symbol width and height offset from outer edges.
+ if (mCursor.y + mSymbolHeight > mHeight) {
+ return 0;
+ }
+
+ if (mCursor.x + mSymbolWidth > mWidth && !AdvanceCursor()) {
+ return 0;
+ }
+
+ // Normalize frame integral to mSymbolWidth x mSymbolHeight
+ uint32_t width = mWidth / mSymbolWidth;
+ uint32_t height = mHeight / mSymbolHeight;
+ uint32_t x = mCursor.x / mSymbolWidth;
+ uint32_t y = mCursor.y / mSymbolHeight;
+
+ return (width * height - width * y)- x;
+ }
+
+ bool YuvStamper::Write8(unsigned char value)
+ {
+ // Encode MSB to LSB.
+ unsigned char mask = 0x80;
+ while (mask) {
+ if (!WriteBit(!!(value & mask))) {
+ return false;
+ }
+ mask >>= 1;
+ }
+ return true;
+ }
+
+ bool YuvStamper::WriteBit(bool one)
+ {
+ // A bit is mapped to a mSymbolWidth x mSymbolHeight square of luma data points.
+ // Don't use ternary op.: https://bugzilla.mozilla.org/show_bug.cgi?id=1001708
+ unsigned char value;
+ if (one)
+ value = sYOn;
+ else
+ value = sYOff;
+
+ for (uint32_t y = 0; y < mSymbolHeight; y++) {
+ for (uint32_t x = 0; x < mSymbolWidth; x++) {
+ *(pYData + (mCursor.x + x) + ((mCursor.y + y) * mStride)) = value;
+ }
+ }
+
+ return AdvanceCursor();
+ }
+
+ bool YuvStamper::AdvanceCursor()
+ {
+ mCursor.x += mSymbolWidth;
+ if (mCursor.x + mSymbolWidth > mWidth) {
+ // move to the start of the next row if possible.
+ mCursor.y += mSymbolHeight;
+ if (mCursor.y + mSymbolHeight > mHeight) {
+ // end of frame, do not advance
+ mCursor.y -= mSymbolHeight;
+ mCursor.x -= mSymbolWidth;
+ return false;
+ } else {
+ mCursor.x = 0;
+ }
+ }
+
+ return true;
+ }
+
+ bool YuvStamper::Read8(unsigned char &value)
+ {
+ unsigned char octet = 0;
+ unsigned char bit = 0;
+
+ for (int i = 8; i > 0; --i) {
+ if (!ReadBit(bit)) {
+ return false;
+ }
+ octet <<= 1;
+ octet |= bit;
+ }
+
+ value = octet;
+ return true;
+ }
+
+ bool YuvStamper::ReadBit(unsigned char &bit)
+ {
+ uint32_t sum = 0;
+ for (uint32_t y = 0; y < mSymbolHeight; y++) {
+ for (uint32_t x = 0; x < mSymbolWidth; x++) {
+ sum += *(pYData + mStride * (mCursor.y + y) + mCursor.x + x);
+ }
+ }
+
+ // apply threshold to collected bit square
+ bit = (sum > (sBitThreshold * mSymbolWidth * mSymbolHeight)) ? 1 : 0;
+ return AdvanceCursor();
+ }
+
+ bool YuvStamper::WriteDigits(uint32_t value)
+ {
+ char buf[20];
+ SprintfLiteral(buf, "%.5u", value);
+ size_t size = strlen(buf);
+
+ if (Capacity() < size) {
+ return false;
+ }
+
+ for (size_t i=0; i < size; ++i) {
+ if (!WriteDigit(buf[i] - '0'))
+ return false;
+ if (!AdvanceCursor()) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool YuvStamper::WriteDigit(unsigned char digit) {
+ if (digit > sizeof(DIGITS)/sizeof(DIGITS[0]))
+ return false;
+
+ unsigned char *dig = DIGITS[digit];
+ for (uint32_t row = 0; row < sDigitHeight; ++row) {
+ unsigned char mask = 0x01 << (sDigitWidth - 1);
+ for (uint32_t col = 0; col < sDigitWidth; ++col, mask >>= 1) {
+ if (dig[row] & mask) {
+ for (uint32_t xx=0; xx < sPixelSize; ++xx) {
+ for (uint32_t yy=0; yy < sPixelSize; ++yy) {
+ WritePixel(pYData,
+ mCursor.x + (col * sPixelSize) + xx,
+ mCursor.y + (row * sPixelSize) + yy);
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ void YuvStamper::WritePixel(unsigned char *data, uint32_t x, uint32_t y) {
+ unsigned char *ptr = &data[y * mStride + x];
+ // Don't use ternary op.: https://bugzilla.mozilla.org/show_bug.cgi?id=1001708
+ if (*ptr > sLumaThreshold)
+ *ptr = sLumaMin;
+ else
+ *ptr = sLumaMax;
+ }
+
+} // namespace mozilla.
diff --git a/media/webrtc/signaling/src/common/YuvStamper.h b/media/webrtc/signaling/src/common/YuvStamper.h
new file mode 100644
index 000000000..fb2d6e466
--- /dev/null
+++ b/media/webrtc/signaling/src/common/YuvStamper.h
@@ -0,0 +1,83 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef YUV_STAMPER_H_
+#define YUV_STAMPER_H_
+
+#include "nptypes.h"
+
+namespace mozilla {
+
+class
+YuvStamper {
+public:
+ bool WriteDigits(uint32_t value);
+
+ template<typename T>
+ static bool Write(uint32_t width, uint32_t height, uint32_t stride,
+ unsigned char *pYData, const T& value,
+ uint32_t x=0, uint32_t y=0)
+ {
+ YuvStamper stamper(pYData, width, height, stride,
+ x, y,
+ (sDigitWidth + sInterDigit) * sPixelSize,
+ (sDigitHeight + sInterLine) * sPixelSize);
+ return stamper.WriteDigits(value);
+ }
+
+ static bool Encode(uint32_t width, uint32_t height, uint32_t stride,
+ unsigned char* pYData, unsigned char* pMsg, size_t msg_len,
+ uint32_t x = 0, uint32_t y = 0);
+
+ static bool Decode(uint32_t width, uint32_t height, uint32_t stride,
+ unsigned char* pYData, unsigned char* pMsg, size_t msg_len,
+ uint32_t x = 0, uint32_t y = 0);
+
+ private:
+ YuvStamper(unsigned char* pYData,
+ uint32_t width, uint32_t height, uint32_t stride,
+ uint32_t x, uint32_t y,
+ unsigned char symbol_width, unsigned char symbol_height);
+
+ bool WriteDigit(unsigned char digit);
+ void WritePixel(unsigned char* data, uint32_t x, uint32_t y);
+ uint32_t Capacity();
+ bool AdvanceCursor();
+ bool WriteBit(bool one);
+ bool Write8(unsigned char value);
+ bool ReadBit(unsigned char &value);
+ bool Read8(unsigned char &bit);
+
+ const static unsigned char sPixelSize = 3;
+ const static unsigned char sDigitWidth = 6;
+ const static unsigned char sDigitHeight = 7;
+ const static unsigned char sInterDigit = 1;
+ const static unsigned char sInterLine = 1;
+ const static uint32_t sBitSize = 4;
+ const static uint32_t sBitThreshold = 60;
+ const static unsigned char sYOn = 0x80;
+ const static unsigned char sYOff = 0;
+ const static unsigned char sLumaThreshold = 96;
+ const static unsigned char sLumaMin = 16;
+ const static unsigned char sLumaMax = 235;
+
+ unsigned char* pYData;
+ uint32_t mStride;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ unsigned char mSymbolWidth;
+ unsigned char mSymbolHeight;
+
+ struct Cursor {
+ Cursor(uint32_t x, uint32_t y):
+ x(x), y(y) {}
+ uint32_t x;
+ uint32_t y;
+ } mCursor;
+};
+
+}
+
+#endif
+
diff --git a/media/webrtc/signaling/src/common/browser_logging/CSFLog.cpp b/media/webrtc/signaling/src/common/browser_logging/CSFLog.cpp
new file mode 100644
index 000000000..3d7e2d6dc
--- /dev/null
+++ b/media/webrtc/signaling/src/common/browser_logging/CSFLog.cpp
@@ -0,0 +1,103 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+
+#include "CSFLog.h"
+#include "base/basictypes.h"
+
+#include <map>
+#include "prrwlock.h"
+#include "prthread.h"
+#include "nsThreadUtils.h"
+
+#include "mozilla/Logging.h"
+#include "mozilla/Sprintf.h"
+
+static PRLogModuleInfo *gLogModuleInfo = nullptr;
+
+PRLogModuleInfo *GetSignalingLogInfo()
+{
+ if (gLogModuleInfo == nullptr)
+ gLogModuleInfo = PR_NewLogModule("signaling");
+
+ return gLogModuleInfo;
+}
+
+static PRLogModuleInfo *gWebRTCLogModuleInfo = nullptr;
+
+PRLogModuleInfo *GetWebRTCLogInfo()
+{
+ if (gWebRTCLogModuleInfo == nullptr)
+ gWebRTCLogModuleInfo = PR_NewLogModule("webrtc_trace");
+
+ return gWebRTCLogModuleInfo;
+}
+
+
+void CSFLogV(CSFLogLevel priority, const char* sourceFile, int sourceLine, const char* tag , const char* format, va_list args)
+{
+#ifdef STDOUT_LOGGING
+ printf("%s\n:",tag);
+ vprintf(format, args);
+#else
+
+ mozilla::LogLevel level = static_cast<mozilla::LogLevel>(priority);
+
+ GetSignalingLogInfo();
+
+ // Skip doing any of this work if we're not logging the indicated level...
+ if (!MOZ_LOG_TEST(gLogModuleInfo,level)) {
+ return;
+ }
+
+ // Trim the path component from the filename
+ const char *lastSlash = sourceFile;
+ while (*sourceFile) {
+ if (*sourceFile == '/' || *sourceFile == '\\') {
+ lastSlash = sourceFile;
+ }
+ sourceFile++;
+ }
+ sourceFile = lastSlash;
+ if (*sourceFile == '/' || *sourceFile == '\\') {
+ sourceFile++;
+ }
+
+#define MAX_MESSAGE_LENGTH 1024
+ char message[MAX_MESSAGE_LENGTH];
+
+ const char *threadName = NULL;
+
+ // Check if we're the main thread...
+ if (NS_IsMainThread()) {
+ threadName = "main";
+ } else {
+ threadName = PR_GetThreadName(PR_GetCurrentThread());
+ }
+
+ // If we can't find it anywhere, use a blank string
+ if (!threadName) {
+ threadName = "";
+ }
+
+ VsprintfLiteral(message, format, args);
+ MOZ_LOG(gLogModuleInfo, level, ("[%s|%s] %s:%d: %s",
+ threadName, tag, sourceFile, sourceLine,
+ message));
+#endif
+
+}
+
+void CSFLog( CSFLogLevel priority, const char* sourceFile, int sourceLine, const char* tag , const char* format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+
+ CSFLogV(priority, sourceFile, sourceLine, tag, format, ap);
+ va_end(ap);
+}
+
diff --git a/media/webrtc/signaling/src/common/browser_logging/CSFLog.h b/media/webrtc/signaling/src/common/browser_logging/CSFLog.h
new file mode 100644
index 000000000..a20157992
--- /dev/null
+++ b/media/webrtc/signaling/src/common/browser_logging/CSFLog.h
@@ -0,0 +1,50 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFLOG_H
+#define CSFLOG_H
+
+#include <stdarg.h>
+
+struct PRLogModuleInfo;
+
+typedef enum{
+ CSF_LOG_ERROR = 1,
+ CSF_LOG_WARNING,
+ CSF_LOG_INFO,
+ CSF_LOG_DEBUG,
+ CSF_LOG_VERBOSE,
+} CSFLogLevel;
+
+#define CSFLogError(tag , format, ...) CSFLog( CSF_LOG_ERROR, __FILE__ , __LINE__ , tag , format , ## __VA_ARGS__ )
+#define CSFLogErrorV(tag , format, va_list_arg) CSFLogV(CSF_LOG_ERROR, __FILE__ , __LINE__ , tag , format , va_list_arg )
+#define CSFLogWarn(tag , format, ...) CSFLog( CSF_LOG_WARNING, __FILE__ , __LINE__ , tag , format , ## __VA_ARGS__ )
+#define CSFLogWarnV(tag , format, va_list_arg) CSFLogV(CSF_LOG_WARNING, __FILE__ , __LINE__ , tag , format , va_list_arg )
+#define CSFLogInfo(tag , format, ...) CSFLog( CSF_LOG_INFO, __FILE__ , __LINE__ , tag , format , ## __VA_ARGS__ )
+#define CSFLogInfoV(tag , format, va_list_arg) CSFLogV(CSF_LOG_INFO, __FILE__ , __LINE__ , tag , format , va_list_arg )
+#define CSFLogDebug(tag , format, ...) CSFLog(CSF_LOG_DEBUG, __FILE__ , __LINE__ , tag , format , ## __VA_ARGS__ )
+#define CSFLogDebugV(tag , format, va_list_arg) CSFLogV(CSF_LOG_DEBUG, __FILE__ , __LINE__ , tag , format , va_list_arg )
+#define CSFLogVerbose(tag , format, ...) CSFLog(CSF_LOG_VERBOSE, __FILE__ , __LINE__ , tag , format , ## __VA_ARGS__ )
+#define CSFLogVerboseV(tag , format, va_list_arg) CSFLogV(CSF_LOG_VERBOSE, __FILE__ , __LINE__ , tag , format , va_list_arg )
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+void CSFLog( CSFLogLevel priority, const char* sourceFile, int sourceLine, const char* tag , const char* format, ...)
+#ifdef __GNUC__
+ __attribute__ ((format (printf, 5, 6)))
+#endif
+;
+
+void CSFLogV( CSFLogLevel priority, const char* sourceFile, int sourceLine, const char* tag , const char* format, va_list args);
+
+struct PRLogModuleInfo *GetSignalingLogInfo();
+struct PRLogModuleInfo *GetWebRTCLogInfo();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/common/browser_logging/WebRtcLog.cpp b/media/webrtc/signaling/src/common/browser_logging/WebRtcLog.cpp
new file mode 100644
index 000000000..875e0ed2c
--- /dev/null
+++ b/media/webrtc/signaling/src/common/browser_logging/WebRtcLog.cpp
@@ -0,0 +1,258 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebRtcLog.h"
+
+#include "mozilla/Logging.h"
+#include "prenv.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+
+#include "nscore.h"
+#ifdef MOZILLA_INTERNAL_API
+#include "nsString.h"
+#include "nsXULAppAPI.h"
+#include "mozilla/Preferences.h"
+#else
+#include "nsStringAPI.h"
+#endif
+
+#include "nsIFile.h"
+#include "nsDirectoryServiceUtils.h"
+#include "nsDirectoryServiceDefs.h"
+
+using mozilla::LogLevel;
+
+static int gWebRtcTraceLoggingOn = 0;
+
+
+#if defined(ANDROID)
+static const char *default_tmp_dir = "/dev/null";
+static const char *default_log_name = "nspr";
+#else // Assume a POSIX environment
+NS_NAMED_LITERAL_CSTRING(default_log_name, "WebRTC.log");
+#endif
+
+static PRLogModuleInfo* GetWebRtcTraceLog()
+{
+ static PRLogModuleInfo *sLog;
+ if (!sLog) {
+ sLog = PR_NewLogModule("webrtc_trace");
+ }
+ return sLog;
+}
+
+static PRLogModuleInfo* GetWebRtcAECLog()
+{
+ static PRLogModuleInfo *sLog;
+ if (!sLog) {
+ sLog = PR_NewLogModule("AEC");
+ }
+ return sLog;
+}
+
+class WebRtcTraceCallback: public webrtc::TraceCallback
+{
+public:
+ void Print(webrtc::TraceLevel level, const char* message, int length)
+ {
+ PRLogModuleInfo *log = GetWebRtcTraceLog();
+ MOZ_LOG(log, LogLevel::Debug, ("%s", message));
+ }
+};
+
+static WebRtcTraceCallback gWebRtcCallback;
+
+#ifdef MOZILLA_INTERNAL_API
+void GetWebRtcLogPrefs(uint32_t *aTraceMask, nsACString* aLogFile, nsACString *aAECLogDir, bool *aMultiLog)
+{
+ *aMultiLog = mozilla::Preferences::GetBool("media.webrtc.debug.multi_log");
+ *aTraceMask = mozilla::Preferences::GetUint("media.webrtc.debug.trace_mask");
+ mozilla::Preferences::GetCString("media.webrtc.debug.log_file", aLogFile);
+ mozilla::Preferences::GetCString("media.webrtc.debug.aec_log_dir", aAECLogDir);
+ webrtc::Trace::set_aec_debug_size(mozilla::Preferences::GetUint("media.webrtc.debug.aec_dump_max_size"));
+}
+#endif
+
+void CheckOverrides(uint32_t *aTraceMask, nsACString *aLogFile, bool *aMultiLog)
+{
+ if (!aTraceMask || !aLogFile || !aMultiLog) {
+ return;
+ }
+
+ // Override or fill in attributes from the environment if possible.
+
+ PRLogModuleInfo *log_info = GetWebRtcTraceLog();
+ /* When webrtc_trace:x is not part of the NSPR_LOG_MODULES var the structure returned from
+ the GetWebRTCLogInfo call will be non-null and show a level of 0. This cannot
+ be reliably used to turn off the trace and override a log level from about:config as
+ there is no way to differentiate between NSPR_LOG_MODULES=webrtc_trace:0 and the complete
+ absense of the webrtc_trace in the environment string at all.
+ */
+ if (log_info && (log_info->level != 0)) {
+ *aTraceMask = log_info->level;
+ }
+
+ log_info = GetWebRtcAECLog();
+ if (log_info && (log_info->level != 0)) {
+ webrtc::Trace::set_aec_debug(true);
+ }
+
+ const char *file_name = PR_GetEnv("WEBRTC_TRACE_FILE");
+ if (file_name) {
+ aLogFile->Assign(file_name);
+ }
+}
+
+void ConfigWebRtcLog(uint32_t trace_mask, nsCString &aLogFile, nsCString &aAECLogDir, bool multi_log)
+{
+ if (gWebRtcTraceLoggingOn) {
+ return;
+ }
+
+#if defined(ANDROID)
+ // Special case: use callback to pipe to NSPR logging.
+ aLogFile.Assign(default_log_name);
+#else
+
+ webrtc::Trace::set_level_filter(trace_mask);
+
+ if (trace_mask != 0) {
+ if (aLogFile.EqualsLiteral("nspr")) {
+ webrtc::Trace::SetTraceCallback(&gWebRtcCallback);
+ } else {
+ webrtc::Trace::SetTraceFile(aLogFile.get(), multi_log);
+ }
+ }
+
+ if (aLogFile.IsEmpty()) {
+ nsCOMPtr<nsIFile> tempDir;
+ nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tempDir));
+ if (NS_SUCCEEDED(rv)) {
+ tempDir->AppendNative(default_log_name);
+ tempDir->GetNativePath(aLogFile);
+ }
+ }
+#endif
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (XRE_IsParentProcess()) {
+ // Capture the final choice for the trace setting.
+ mozilla::Preferences::SetCString("media.webrtc.debug.log_file", aLogFile);
+ }
+#endif
+ return;
+}
+
+void StartWebRtcLog(uint32_t log_level)
+{
+ if (gWebRtcTraceLoggingOn && log_level != 0) {
+ return;
+ }
+
+ if (log_level == 0) {
+ if (gWebRtcTraceLoggingOn) {
+ gWebRtcTraceLoggingOn = false;
+ webrtc::Trace::set_level_filter(webrtc::kTraceNone);
+ }
+ return;
+ }
+
+ uint32_t trace_mask = 0;
+ bool multi_log = false;
+ nsAutoCString log_file;
+ nsAutoCString aec_log_dir;
+
+#ifdef MOZILLA_INTERNAL_API
+ GetWebRtcLogPrefs(&trace_mask, &log_file, &aec_log_dir, &multi_log);
+#endif
+ CheckOverrides(&trace_mask, &log_file, &multi_log);
+
+ if (trace_mask == 0) {
+ trace_mask = log_level;
+ }
+
+ ConfigWebRtcLog(trace_mask, log_file, aec_log_dir, multi_log);
+ return;
+
+}
+
+void EnableWebRtcLog()
+{
+ if (gWebRtcTraceLoggingOn) {
+ return;
+ }
+
+ uint32_t trace_mask = 0;
+ bool multi_log = false;
+ nsAutoCString log_file;
+ nsAutoCString aec_log_dir;
+
+#ifdef MOZILLA_INTERNAL_API
+ GetWebRtcLogPrefs(&trace_mask, &log_file, &aec_log_dir, &multi_log);
+#endif
+ CheckOverrides(&trace_mask, &log_file, &multi_log);
+ ConfigWebRtcLog(trace_mask, log_file, aec_log_dir, multi_log);
+ return;
+}
+
+void StopWebRtcLog()
+{
+ // TODO(NG) strip/fix gWebRtcTraceLoggingOn which is never set to true
+ webrtc::Trace::set_level_filter(webrtc::kTraceNone);
+ webrtc::Trace::SetTraceCallback(nullptr);
+ webrtc::Trace::SetTraceFile(nullptr);
+}
+
+void ConfigAecLog(nsCString &aAECLogDir) {
+ if (webrtc::Trace::aec_debug()) {
+ return;
+ }
+#if defined(ANDROID)
+ // For AEC, do not use a default value: force the user to specify a directory.
+ if (aAECLogDir.IsEmpty()) {
+ aAECLogDir.Assign(default_tmp_dir);
+ }
+#else
+ if (aAECLogDir.IsEmpty()) {
+ nsCOMPtr<nsIFile> tempDir;
+ nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tempDir));
+ if (NS_SUCCEEDED(rv)) {
+ if (aAECLogDir.IsEmpty()) {
+ tempDir->GetNativePath(aAECLogDir);
+ }
+ }
+ }
+#endif
+ webrtc::Trace::set_aec_debug_filename(aAECLogDir.get());
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (XRE_IsParentProcess()) {
+ // Capture the final choice for the aec_log_dir setting.
+ mozilla::Preferences::SetCString("media.webrtc.debug.aec_log_dir", aAECLogDir);
+ }
+#endif
+}
+
+void StartAecLog()
+{
+ if (webrtc::Trace::aec_debug()) {
+ return;
+ }
+ uint32_t trace_mask = 0;
+ bool multi_log = false;
+ nsAutoCString log_file;
+ nsAutoCString aec_log_dir;
+
+#ifdef MOZILLA_INTERNAL_API
+ GetWebRtcLogPrefs(&trace_mask, &log_file, &aec_log_dir, &multi_log);
+#endif
+ CheckOverrides(&trace_mask, &log_file, &multi_log);
+ ConfigAecLog(aec_log_dir);
+
+ webrtc::Trace::set_aec_debug(true);
+}
+
+void StopAecLog()
+{
+ webrtc::Trace::set_aec_debug(false);
+}
diff --git a/media/webrtc/signaling/src/common/browser_logging/WebRtcLog.h b/media/webrtc/signaling/src/common/browser_logging/WebRtcLog.h
new file mode 100644
index 000000000..58a824bee
--- /dev/null
+++ b/media/webrtc/signaling/src/common/browser_logging/WebRtcLog.h
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBRTCLOG_H_
+#define WEBRTCLOG_H_
+
+#include "webrtc/common_types.h"
+
+void StartAecLog();
+void StopAecLog();
+void StartWebRtcLog(uint32_t log_level = webrtc::kTraceDefault);
+void EnableWebRtcLog();
+void StopWebRtcLog();
+
+#endif
diff --git a/media/webrtc/signaling/src/common/csf_common.h b/media/webrtc/signaling/src/common/csf_common.h
new file mode 100644
index 000000000..f46abf69e
--- /dev/null
+++ b/media/webrtc/signaling/src/common/csf_common.h
@@ -0,0 +1,79 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CSF_COMMON_E58E5677_950A_424c_B6C2_CA180092E6A2_H
+#define _CSF_COMMON_E58E5677_950A_424c_B6C2_CA180092E6A2_H
+
+#include <assert.h>
+#include <memory>
+#include <vector>
+#include <stdlib.h>
+
+/*
+
+This header file defines:
+
+csf_countof
+csf_sprintf
+csf_vsprintf
+
+*/
+
+/*
+ General security tip: Ensure that "format" is never a user-defined string. Format should ALWAYS be something that's built into your code, not
+ user supplied. For example: never write:
+
+ csf_sprintf(buffer, csf_countof(buffer), pUserSuppliedString);
+
+ Instead write:
+
+ csf_sprintf(buffer, csf_countof(buffer), "%s", pUserSuppliedString);
+
+*/
+
+#ifdef WIN32
+ #if !defined(_countof)
+ #if !defined(__cplusplus)
+ #define _countof(_Array) (sizeof(_Array) / sizeof(_Array[0]))
+ #else
+ extern "C++"
+ {
+ template <typename _CountofType, size_t _SizeOfArray>
+ char (*_csf_countof_helper(_CountofType (&_Array)[_SizeOfArray]))[_SizeOfArray];
+ #define _countof(_Array) sizeof(*_csf_countof_helper(_Array))
+ }
+ #endif
+ #endif
+#else
+ #define _countof(_Array) (sizeof(_Array) / sizeof(_Array[0]))
+#endif
+//csf_countof
+
+#define csf_countof(anArray) _countof(anArray)
+
+//csf_sprintf
+
+#ifdef _WIN32
+ //Unlike snprintf, sprintf_s guarantees that the buffer will be null-terminated (unless the buffer size is zero).
+ #define csf_sprintf(/* char* */ buffer, /* size_t */ sizeOfBufferInCharsInclNullTerm, /* const char * */ format, ...)\
+ _snprintf_s (buffer, sizeOfBufferInCharsInclNullTerm, _TRUNCATE, format, __VA_ARGS__)
+#else
+ #define csf_sprintf(/* char */ buffer, /* size_t */ sizeOfBufferInCharsInclNullTerm, /* const char * */ format, ...)\
+ snprintf (buffer, sizeOfBufferInCharsInclNullTerm, format, __VA_ARGS__);\
+ buffer[sizeOfBufferInCharsInclNullTerm-1] = '\0'
+#endif
+
+//csf_vsprintf
+
+#ifdef _WIN32
+ #define csf_vsprintf(/* char* */ buffer, /* size_t */ sizeOfBufferInCharsInclNullTerm, /* const char * */ format, /* va_list */ vaList)\
+ vsnprintf_s (buffer, sizeOfBufferInCharsInclNullTerm, _TRUNCATE, format, vaList);\
+ buffer[sizeOfBufferInCharsInclNullTerm-1] = '\0'
+#else
+ #define csf_vsprintf(/* char */ buffer, /* size_t */ sizeOfBufferInCharsInclNullTerm, /* const char * */ format, /* va_list */ vaList)\
+ vsprintf (buffer, format, vaList);\
+ buffer[sizeOfBufferInCharsInclNullTerm-1] = '\0'
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/common/time_profiling/timecard.c b/media/webrtc/signaling/src/common/time_profiling/timecard.c
new file mode 100644
index 000000000..e56377534
--- /dev/null
+++ b/media/webrtc/signaling/src/common/time_profiling/timecard.c
@@ -0,0 +1,125 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdio.h>
+#include "timecard.h"
+#include "mozilla/mozalloc.h"
+
+Timecard *
+create_timecard()
+{
+ Timecard *tc = moz_xcalloc(1,sizeof(Timecard));
+ tc->entries_allocated = TIMECARD_INITIAL_TABLE_SIZE;
+ tc->entries = moz_xcalloc(tc->entries_allocated, sizeof(TimecardEntry));
+ tc->start_time = PR_Now();
+ return tc;
+}
+
+void
+destroy_timecard(Timecard *tc)
+{
+ free(tc->entries);
+ free(tc);
+}
+
+void
+stamp_timecard(Timecard *tc,
+ const char *event,
+ const char *file,
+ unsigned int line,
+ const char *function)
+{
+ TimecardEntry *entry = NULL;
+
+ /* Trim the path component from the filename */
+ const char *last_slash = file;
+ while (*file) {
+ if (*file == '/' || *file == '\\') {
+ last_slash = file;
+ }
+ file++;
+ }
+ file = last_slash;
+ if (*file == '/' || *file == '\\') {
+ file++;
+ }
+
+ /* Ensure there is enough space left in the entries list */
+ if (tc->curr_entry == tc->entries_allocated) {
+ tc->entries_allocated *= 2;
+ tc->entries = moz_xrealloc(tc->entries,
+ tc->entries_allocated * sizeof(TimecardEntry));
+ }
+
+ /* Record the data into the timecard entry */
+ entry = &tc->entries[tc->curr_entry];
+ entry->timestamp = PR_Now();
+ entry->event = event;
+ entry->file = file;
+ entry->line = line;
+ entry->function = function;
+ tc->curr_entry++;
+}
+
+void
+print_timecard(Timecard *tc)
+{
+ size_t i;
+ TimecardEntry *entry;
+ size_t event_width = 5;
+ size_t file_width = 4;
+ size_t function_width = 8;
+ size_t line_width;
+ PRTime offset, delta;
+
+ for (i = 0; i < tc->curr_entry; i++) {
+ entry = &tc->entries[i];
+ if (strlen(entry->event) > event_width) {
+ event_width = strlen(entry->event);
+ }
+ if (strlen(entry->file) > file_width) {
+ file_width = strlen(entry->file);
+ }
+ if (strlen(entry->function) > function_width) {
+ function_width = strlen(entry->function);
+ }
+ }
+
+ printf("\nTimecard created %4ld.%6.6ld\n\n",
+ (long)(tc->start_time / PR_USEC_PER_SEC),
+ (long)(tc->start_time % PR_USEC_PER_SEC));
+
+ line_width = 1 + 11 + 11 + event_width + file_width + 6 +
+ function_width + (4 * 3);
+
+ printf(" %-11s | %-11s | %-*s | %-*s | %-*s\n",
+ "Timestamp", "Delta",
+ (int)event_width, "Event",
+ (int)file_width + 6, "File",
+ (int)function_width, "Function");
+
+ for (i = 0; i <= line_width; i++) {
+ printf("=");
+ }
+ printf("\n");
+
+ for (i = 0; i < tc->curr_entry; i++) {
+ entry = &tc->entries[i];
+ offset = entry->timestamp - tc->start_time;
+ if (i > 0) {
+ delta = entry->timestamp - tc->entries[i-1].timestamp;
+ } else {
+ delta = entry->timestamp - tc->start_time;
+ }
+ printf(" %4ld.%6.6ld | %4ld.%6.6ld | %-*s | %*s:%-5d | %-*s\n",
+ (long)(offset / PR_USEC_PER_SEC), (long)(offset % PR_USEC_PER_SEC),
+ (long)(delta / PR_USEC_PER_SEC), (long)(delta % PR_USEC_PER_SEC),
+ (int)event_width, entry->event,
+ (int)file_width, entry->file, entry->line,
+ (int)function_width, entry->function);
+ }
+ printf("\n");
+}
diff --git a/media/webrtc/signaling/src/common/time_profiling/timecard.h b/media/webrtc/signaling/src/common/time_profiling/timecard.h
new file mode 100644
index 000000000..ca53b8dfe
--- /dev/null
+++ b/media/webrtc/signaling/src/common/time_profiling/timecard.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef timecard_h__
+#define timecard_h__
+
+#include <stdlib.h>
+#include "prtime.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define STAMP_TIMECARD(card,event) \
+ do { \
+ if (card) { \
+ stamp_timecard((card), (event), __FILE__, __LINE__, __FUNCTION__); \
+ } \
+ } while (0)
+
+#define TIMECARD_INITIAL_TABLE_SIZE 16
+
+/*
+ * The "const char *" members of this structure point to static strings.
+ * We do not own them, and should not attempt to deallocate them.
+ */
+
+typedef struct {
+ PRTime timestamp;
+ const char *event;
+ const char *file;
+ unsigned int line;
+ const char *function;
+} TimecardEntry;
+
+typedef struct Timecard {
+ size_t curr_entry;
+ size_t entries_allocated;
+ TimecardEntry *entries;
+ PRTime start_time;
+} Timecard;
+
+/**
+ * Creates a new Timecard structure for tracking events.
+ */
+Timecard *
+create_timecard();
+
+/**
+ * Frees the memory associated with a timecard. After returning, the
+ * timecard pointed to by tc is no longer valid.
+ */
+void
+destroy_timecard(Timecard *tc);
+
+/**
+ * Records a new event in the indicated timecard. This should not be
+ * called directly; code should instead use the STAMP_TIMECARD macro,
+ * above.
+ */
+void
+stamp_timecard(Timecard *tc,
+ const char *event,
+ const char *file,
+ unsigned int line,
+ const char *function);
+
+/**
+ * Formats and outputs the contents of a timecard onto stdout.
+ */
+void
+print_timecard(Timecard *tc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/jsep/JsepCodecDescription.h b/media/webrtc/signaling/src/jsep/JsepCodecDescription.h
new file mode 100644
index 000000000..6ae5c9380
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepCodecDescription.h
@@ -0,0 +1,780 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _JSEPCODECDESCRIPTION_H_
+#define _JSEPCODECDESCRIPTION_H_
+
+#include <string>
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/sdp/SdpHelper.h"
+#include "nsCRT.h"
+
+namespace mozilla {
+
+#define JSEP_CODEC_CLONE(T) \
+ virtual JsepCodecDescription* Clone() const override \
+ { \
+ return new T(*this); \
+ }
+
+// A single entry in our list of known codecs.
+class JsepCodecDescription {
+ public:
+ JsepCodecDescription(mozilla::SdpMediaSection::MediaType type,
+ const std::string& defaultPt,
+ const std::string& name,
+ uint32_t clock,
+ uint32_t channels,
+ bool enabled)
+ : mType(type),
+ mDefaultPt(defaultPt),
+ mName(name),
+ mClock(clock),
+ mChannels(channels),
+ mEnabled(enabled),
+ mStronglyPreferred(false),
+ mDirection(sdp::kSend)
+ {
+ }
+ virtual ~JsepCodecDescription() {}
+
+ virtual JsepCodecDescription* Clone() const = 0;
+
+ bool
+ GetPtAsInt(uint16_t* ptOutparam) const
+ {
+ return SdpHelper::GetPtAsInt(mDefaultPt, ptOutparam);
+ }
+
+ virtual bool
+ Matches(const std::string& fmt, const SdpMediaSection& remoteMsection) const
+ {
+ // note: fmt here is remote fmt (to go with remoteMsection)
+ if (mType != remoteMsection.GetMediaType()) {
+ return false;
+ }
+
+ const SdpRtpmapAttributeList::Rtpmap* entry(remoteMsection.FindRtpmap(fmt));
+
+ if (entry) {
+ if (!nsCRT::strcasecmp(mName.c_str(), entry->name.c_str())
+ && (mClock == entry->clock)
+ && (mChannels == entry->channels)) {
+ return ParametersMatch(fmt, remoteMsection);
+ }
+ } else if (!fmt.compare("9") && mName == "G722") {
+ return true;
+ } else if (!fmt.compare("0") && mName == "PCMU") {
+ return true;
+ } else if (!fmt.compare("8") && mName == "PCMA") {
+ return true;
+ }
+ return false;
+ }
+
+ virtual bool
+ ParametersMatch(const std::string& fmt,
+ const SdpMediaSection& remoteMsection) const
+ {
+ return true;
+ }
+
+ virtual bool
+ Negotiate(const std::string& pt, const SdpMediaSection& remoteMsection)
+ {
+ mDefaultPt = pt;
+ return true;
+ }
+
+ virtual void
+ AddToMediaSection(SdpMediaSection& msection) const
+ {
+ if (mEnabled && msection.GetMediaType() == mType) {
+ // Both send and recv codec will have the same pt, so don't add twice
+ if (!msection.HasFormat(mDefaultPt)) {
+ if (mType == SdpMediaSection::kApplication) {
+ // Hack: using mChannels for number of streams
+ msection.AddDataChannel(mDefaultPt, mName, mChannels);
+ } else {
+ msection.AddCodec(mDefaultPt, mName, mClock, mChannels);
+ }
+ }
+
+ AddParametersToMSection(msection);
+ }
+ }
+
+ virtual void AddParametersToMSection(SdpMediaSection& msection) const {}
+
+ mozilla::SdpMediaSection::MediaType mType;
+ std::string mDefaultPt;
+ std::string mName;
+ uint32_t mClock;
+ uint32_t mChannels;
+ bool mEnabled;
+ bool mStronglyPreferred;
+ sdp::Direction mDirection;
+ // Will hold constraints from both fmtp and rid
+ EncodingConstraints mConstraints;
+};
+
+class JsepAudioCodecDescription : public JsepCodecDescription {
+ public:
+ JsepAudioCodecDescription(const std::string& defaultPt,
+ const std::string& name,
+ uint32_t clock,
+ uint32_t channels,
+ uint32_t packetSize,
+ uint32_t bitRate,
+ bool enabled = true)
+ : JsepCodecDescription(mozilla::SdpMediaSection::kAudio, defaultPt, name,
+ clock, channels, enabled),
+ mPacketSize(packetSize),
+ mBitrate(bitRate),
+ mMaxPlaybackRate(0),
+ mForceMono(false),
+ mFECEnabled(false),
+ mDtmfEnabled(false)
+ {
+ }
+
+ JSEP_CODEC_CLONE(JsepAudioCodecDescription)
+
+ SdpFmtpAttributeList::OpusParameters
+ GetOpusParameters(const std::string& pt,
+ const SdpMediaSection& msection) const
+ {
+ // Will contain defaults if nothing else
+ SdpFmtpAttributeList::OpusParameters result;
+ auto* params = msection.FindFmtp(pt);
+
+ if (params && params->codec_type == SdpRtpmapAttributeList::kOpus) {
+ result =
+ static_cast<const SdpFmtpAttributeList::OpusParameters&>(*params);
+ }
+
+ return result;
+ }
+
+ SdpFmtpAttributeList::TelephoneEventParameters
+ GetTelephoneEventParameters(const std::string& pt,
+ const SdpMediaSection& msection) const
+ {
+ // Will contain defaults if nothing else
+ SdpFmtpAttributeList::TelephoneEventParameters result;
+ auto* params = msection.FindFmtp(pt);
+
+ if (params && params->codec_type == SdpRtpmapAttributeList::kTelephoneEvent) {
+ result =
+ static_cast<const SdpFmtpAttributeList::TelephoneEventParameters&>
+ (*params);
+ }
+
+ return result;
+ }
+
+ void
+ AddParametersToMSection(SdpMediaSection& msection) const override
+ {
+ if (mDirection == sdp::kSend) {
+ return;
+ }
+
+ if (mName == "opus") {
+ SdpFmtpAttributeList::OpusParameters opusParams(
+ GetOpusParameters(mDefaultPt, msection));
+ if (mMaxPlaybackRate) {
+ opusParams.maxplaybackrate = mMaxPlaybackRate;
+ }
+ if (mChannels == 2 && !mForceMono) {
+ // We prefer to receive stereo, if available.
+ opusParams.stereo = 1;
+ }
+ opusParams.useInBandFec = mFECEnabled ? 1 : 0;
+ msection.SetFmtp(SdpFmtpAttributeList::Fmtp(mDefaultPt, opusParams));
+ } else if (mName == "telephone-event") {
+ // add the default dtmf tones
+ SdpFmtpAttributeList::TelephoneEventParameters teParams(
+ GetTelephoneEventParameters(mDefaultPt, msection));
+ msection.SetFmtp(SdpFmtpAttributeList::Fmtp(mDefaultPt, teParams));
+ }
+ }
+
+ bool
+ Negotiate(const std::string& pt,
+ const SdpMediaSection& remoteMsection) override
+ {
+ JsepCodecDescription::Negotiate(pt, remoteMsection);
+ if (mName == "opus" && mDirection == sdp::kSend) {
+ SdpFmtpAttributeList::OpusParameters opusParams(
+ GetOpusParameters(mDefaultPt, remoteMsection));
+
+ mMaxPlaybackRate = opusParams.maxplaybackrate;
+ mForceMono = !opusParams.stereo;
+ // draft-ietf-rtcweb-fec-03.txt section 4.2 says support for FEC
+ // at the received side is declarative and can be negotiated
+ // separately for either media direction.
+ mFECEnabled = opusParams.useInBandFec;
+ }
+
+ return true;
+ }
+
+ uint32_t mPacketSize;
+ uint32_t mBitrate;
+ uint32_t mMaxPlaybackRate;
+ bool mForceMono;
+ bool mFECEnabled;
+ bool mDtmfEnabled;
+};
+
+class JsepVideoCodecDescription : public JsepCodecDescription {
+ public:
+ JsepVideoCodecDescription(const std::string& defaultPt,
+ const std::string& name,
+ uint32_t clock,
+ bool enabled = true)
+ : JsepCodecDescription(mozilla::SdpMediaSection::kVideo, defaultPt, name,
+ clock, 0, enabled),
+ mTmmbrEnabled(false),
+ mRembEnabled(false),
+ mFECEnabled(false),
+ mPacketizationMode(0)
+ {
+ // Add supported rtcp-fb types
+ mNackFbTypes.push_back("");
+ mNackFbTypes.push_back(SdpRtcpFbAttributeList::pli);
+ mCcmFbTypes.push_back(SdpRtcpFbAttributeList::fir);
+ }
+
+ virtual void
+ EnableTmmbr() {
+ // EnableTmmbr can be called multiple times due to multiple calls to
+ // PeerConnectionImpl::ConfigureJsepSessionCodecs
+ if (!mTmmbrEnabled) {
+ mTmmbrEnabled = true;
+ mCcmFbTypes.push_back(SdpRtcpFbAttributeList::tmmbr);
+ }
+ }
+
+ virtual void
+ EnableRemb() {
+ // EnableRemb can be called multiple times due to multiple calls to
+ // PeerConnectionImpl::ConfigureJsepSessionCodecs
+ if (!mRembEnabled) {
+ mRembEnabled = true;
+ mOtherFbTypes.push_back({ "", SdpRtcpFbAttributeList::kRemb, "", ""});
+ }
+ }
+
+ virtual void
+ EnableFec() {
+ // Enabling FEC for video works a little differently than enabling
+ // REMB or TMMBR. Support for FEC is indicated by the presence of
+ // particular codes (red and ulpfec) instead of using rtcpfb
+ // attributes on a given codec. There is no rtcpfb to push for FEC
+ // as can be seen above when REMB or TMMBR are enabled.
+ mFECEnabled = true;
+ }
+
+ void
+ AddParametersToMSection(SdpMediaSection& msection) const override
+ {
+ AddFmtpsToMSection(msection);
+ AddRtcpFbsToMSection(msection);
+ }
+
+ void
+ AddFmtpsToMSection(SdpMediaSection& msection) const
+ {
+ if (mName == "H264") {
+ SdpFmtpAttributeList::H264Parameters h264Params(
+ GetH264Parameters(mDefaultPt, msection));
+
+ if (mDirection == sdp::kSend) {
+ if (!h264Params.level_asymmetry_allowed) {
+ // First time the fmtp has been set; set just in case this is for a
+ // sendonly m-line, since even though we aren't receiving the level
+ // negotiation still needs to happen (sigh).
+ h264Params.profile_level_id = mProfileLevelId;
+ }
+ } else {
+ // Parameters that only apply to what we receive
+ h264Params.max_mbps = mConstraints.maxMbps;
+ h264Params.max_fs = mConstraints.maxFs;
+ h264Params.max_cpb = mConstraints.maxCpb;
+ h264Params.max_dpb = mConstraints.maxDpb;
+ h264Params.max_br = mConstraints.maxBr;
+ strncpy(h264Params.sprop_parameter_sets,
+ mSpropParameterSets.c_str(),
+ sizeof(h264Params.sprop_parameter_sets) - 1);
+ h264Params.profile_level_id = mProfileLevelId;
+ }
+
+ // Parameters that apply to both the send and recv directions
+ h264Params.packetization_mode = mPacketizationMode;
+ // Hard-coded, may need to change someday?
+ h264Params.level_asymmetry_allowed = true;
+
+ msection.SetFmtp(SdpFmtpAttributeList::Fmtp(mDefaultPt, h264Params));
+ } else if (mName == "red") {
+ SdpFmtpAttributeList::RedParameters redParams(
+ GetRedParameters(mDefaultPt, msection));
+ redParams.encodings = mRedundantEncodings;
+ msection.SetFmtp(SdpFmtpAttributeList::Fmtp(mDefaultPt, redParams));
+ } else if (mName == "VP8" || mName == "VP9") {
+ if (mDirection == sdp::kRecv) {
+ // VP8 and VP9 share the same SDP parameters thus far
+ SdpFmtpAttributeList::VP8Parameters vp8Params(
+ GetVP8Parameters(mDefaultPt, msection));
+
+ vp8Params.max_fs = mConstraints.maxFs;
+ vp8Params.max_fr = mConstraints.maxFps;
+ msection.SetFmtp(SdpFmtpAttributeList::Fmtp(mDefaultPt, vp8Params));
+ }
+ }
+ }
+
+ void
+ AddRtcpFbsToMSection(SdpMediaSection& msection) const
+ {
+ SdpRtcpFbAttributeList rtcpfbs(msection.GetRtcpFbs());
+ for (const auto& rtcpfb : rtcpfbs.mFeedbacks) {
+ if (rtcpfb.pt == mDefaultPt) {
+ // Already set by the codec for the other direction.
+ return;
+ }
+ }
+
+ for (const std::string& type : mAckFbTypes) {
+ rtcpfbs.PushEntry(mDefaultPt, SdpRtcpFbAttributeList::kAck, type);
+ }
+ for (const std::string& type : mNackFbTypes) {
+ rtcpfbs.PushEntry(mDefaultPt, SdpRtcpFbAttributeList::kNack, type);
+ }
+ for (const std::string& type : mCcmFbTypes) {
+ rtcpfbs.PushEntry(mDefaultPt, SdpRtcpFbAttributeList::kCcm, type);
+ }
+ for (const auto& fb : mOtherFbTypes) {
+ rtcpfbs.PushEntry(mDefaultPt, fb.type, fb.parameter, fb.extra);
+ }
+
+ msection.SetRtcpFbs(rtcpfbs);
+ }
+
+ SdpFmtpAttributeList::H264Parameters
+ GetH264Parameters(const std::string& pt,
+ const SdpMediaSection& msection) const
+ {
+ // Will contain defaults if nothing else
+ SdpFmtpAttributeList::H264Parameters result;
+ auto* params = msection.FindFmtp(pt);
+
+ if (params && params->codec_type == SdpRtpmapAttributeList::kH264) {
+ result =
+ static_cast<const SdpFmtpAttributeList::H264Parameters&>(*params);
+ }
+
+ return result;
+ }
+
+ SdpFmtpAttributeList::RedParameters
+ GetRedParameters(const std::string& pt,
+ const SdpMediaSection& msection) const
+ {
+ SdpFmtpAttributeList::RedParameters result;
+ auto* params = msection.FindFmtp(pt);
+
+ if (params && params->codec_type == SdpRtpmapAttributeList::kRed) {
+ result =
+ static_cast<const SdpFmtpAttributeList::RedParameters&>(*params);
+ }
+
+ return result;
+ }
+
+ SdpFmtpAttributeList::VP8Parameters
+ GetVP8Parameters(const std::string& pt,
+ const SdpMediaSection& msection) const
+ {
+ SdpRtpmapAttributeList::CodecType expectedType(
+ mName == "VP8" ?
+ SdpRtpmapAttributeList::kVP8 :
+ SdpRtpmapAttributeList::kVP9);
+
+ // Will contain defaults if nothing else
+ SdpFmtpAttributeList::VP8Parameters result(expectedType);
+ auto* params = msection.FindFmtp(pt);
+
+ if (params && params->codec_type == expectedType) {
+ result =
+ static_cast<const SdpFmtpAttributeList::VP8Parameters&>(*params);
+ }
+
+ return result;
+ }
+
+ void
+ NegotiateRtcpFb(const SdpMediaSection& remoteMsection,
+ SdpRtcpFbAttributeList::Type type,
+ std::vector<std::string>* supportedTypes)
+ {
+ std::vector<std::string> temp;
+ for (auto& subType : *supportedTypes) {
+ if (remoteMsection.HasRtcpFb(mDefaultPt, type, subType)) {
+ temp.push_back(subType);
+ }
+ }
+ *supportedTypes = temp;
+ }
+
+ void
+ NegotiateRtcpFb(const SdpMediaSection& remoteMsection,
+ std::vector<SdpRtcpFbAttributeList::Feedback>* supportedFbs) {
+ std::vector<SdpRtcpFbAttributeList::Feedback> temp;
+ for (auto& fb : *supportedFbs) {
+ if (remoteMsection.HasRtcpFb(mDefaultPt, fb.type, fb.parameter)) {
+ temp.push_back(fb);
+ }
+ }
+ *supportedFbs = temp;
+ }
+
+ void
+ NegotiateRtcpFb(const SdpMediaSection& remote)
+ {
+ // Removes rtcp-fb types that the other side doesn't support
+ NegotiateRtcpFb(remote, SdpRtcpFbAttributeList::kAck, &mAckFbTypes);
+ NegotiateRtcpFb(remote, SdpRtcpFbAttributeList::kNack, &mNackFbTypes);
+ NegotiateRtcpFb(remote, SdpRtcpFbAttributeList::kCcm, &mCcmFbTypes);
+ NegotiateRtcpFb(remote, &mOtherFbTypes);
+ }
+
+ virtual bool
+ Negotiate(const std::string& pt,
+ const SdpMediaSection& remoteMsection) override
+ {
+ JsepCodecDescription::Negotiate(pt, remoteMsection);
+ if (mName == "H264") {
+ SdpFmtpAttributeList::H264Parameters h264Params(
+ GetH264Parameters(mDefaultPt, remoteMsection));
+
+ // Level is negotiated symmetrically if level asymmetry is disallowed
+ if (!h264Params.level_asymmetry_allowed) {
+ SetSaneH264Level(std::min(GetSaneH264Level(h264Params.profile_level_id),
+ GetSaneH264Level(mProfileLevelId)),
+ &mProfileLevelId);
+ }
+
+ if (mDirection == sdp::kSend) {
+ // Remote values of these apply only to the send codec.
+ mConstraints.maxFs = h264Params.max_fs;
+ mConstraints.maxMbps = h264Params.max_mbps;
+ mConstraints.maxCpb = h264Params.max_cpb;
+ mConstraints.maxDpb = h264Params.max_dpb;
+ mConstraints.maxBr = h264Params.max_br;
+ mSpropParameterSets = h264Params.sprop_parameter_sets;
+ // Only do this if we didn't symmetrically negotiate above
+ if (h264Params.level_asymmetry_allowed) {
+ SetSaneH264Level(GetSaneH264Level(h264Params.profile_level_id),
+ &mProfileLevelId);
+ }
+ } else {
+ // TODO(bug 1143709): max-recv-level support
+ }
+ } else if (mName == "red") {
+ SdpFmtpAttributeList::RedParameters redParams(
+ GetRedParameters(mDefaultPt, remoteMsection));
+ mRedundantEncodings = redParams.encodings;
+ } else if (mName == "VP8" || mName == "VP9") {
+ if (mDirection == sdp::kSend) {
+ SdpFmtpAttributeList::VP8Parameters vp8Params(
+ GetVP8Parameters(mDefaultPt, remoteMsection));
+
+ mConstraints.maxFs = vp8Params.max_fs;
+ mConstraints.maxFps = vp8Params.max_fr;
+ }
+ }
+
+ NegotiateRtcpFb(remoteMsection);
+ return true;
+ }
+
+ // Maps the not-so-sane encoding of H264 level into something that is
+ // ordered in the way one would expect
+ // 1b is 0xAB, everything else is the level left-shifted one half-byte
+ // (eg; 1.0 is 0xA0, 1.1 is 0xB0, 3.1 is 0x1F0)
+ static uint32_t
+ GetSaneH264Level(uint32_t profileLevelId)
+ {
+ uint32_t profileIdc = (profileLevelId >> 16);
+
+ if (profileIdc == 0x42 || profileIdc == 0x4D || profileIdc == 0x58) {
+ if ((profileLevelId & 0x10FF) == 0x100B) {
+ // Level 1b
+ return 0xAB;
+ }
+ }
+
+ uint32_t level = profileLevelId & 0xFF;
+
+ if (level == 0x09) {
+ // Another way to encode level 1b
+ return 0xAB;
+ }
+
+ return level << 4;
+ }
+
+ static void
+ SetSaneH264Level(uint32_t level, uint32_t* profileLevelId)
+ {
+ uint32_t profileIdc = (*profileLevelId >> 16);
+ uint32_t levelMask = 0xFF;
+
+ if (profileIdc == 0x42 || profileIdc == 0x4d || profileIdc == 0x58) {
+ levelMask = 0x10FF;
+ if (level == 0xAB) {
+ // Level 1b
+ level = 0x100B;
+ } else {
+ // Not 1b, just shift
+ level = level >> 4;
+ }
+ } else if (level == 0xAB) {
+ // Another way to encode 1b
+ level = 0x09;
+ } else {
+ // Not 1b, just shift
+ level = level >> 4;
+ }
+
+ *profileLevelId = (*profileLevelId & ~levelMask) | level;
+ }
+
+ enum Subprofile {
+ kH264ConstrainedBaseline,
+ kH264Baseline,
+ kH264Main,
+ kH264Extended,
+ kH264High,
+ kH264High10,
+ kH264High42,
+ kH264High44,
+ kH264High10I,
+ kH264High42I,
+ kH264High44I,
+ kH264CALVC44,
+ kH264UnknownSubprofile
+ };
+
+ static Subprofile
+ GetSubprofile(uint32_t profileLevelId)
+ {
+ // Based on Table 5 from RFC 6184:
+ // Profile profile_idc profile-iop
+ // (hexadecimal) (binary)
+
+ // CB 42 (B) x1xx0000
+ // same as: 4D (M) 1xxx0000
+ // same as: 58 (E) 11xx0000
+ // B 42 (B) x0xx0000
+ // same as: 58 (E) 10xx0000
+ // M 4D (M) 0x0x0000
+ // E 58 00xx0000
+ // H 64 00000000
+ // H10 6E 00000000
+ // H42 7A 00000000
+ // H44 F4 00000000
+ // H10I 6E 00010000
+ // H42I 7A 00010000
+ // H44I F4 00010000
+ // C44I 2C 00010000
+
+ if ((profileLevelId & 0xFF4F00) == 0x424000) {
+ // 01001111 (mask, 0x4F)
+ // x1xx0000 (from table)
+ // 01000000 (expected value, 0x40)
+ return kH264ConstrainedBaseline;
+ }
+
+ if ((profileLevelId & 0xFF8F00) == 0x4D8000) {
+ // 10001111 (mask, 0x8F)
+ // 1xxx0000 (from table)
+ // 10000000 (expected value, 0x80)
+ return kH264ConstrainedBaseline;
+ }
+
+ if ((profileLevelId & 0xFFCF00) == 0x58C000) {
+ // 11001111 (mask, 0xCF)
+ // 11xx0000 (from table)
+ // 11000000 (expected value, 0xC0)
+ return kH264ConstrainedBaseline;
+ }
+
+ if ((profileLevelId & 0xFF4F00) == 0x420000) {
+ // 01001111 (mask, 0x4F)
+ // x0xx0000 (from table)
+ // 00000000 (expected value)
+ return kH264Baseline;
+ }
+
+ if ((profileLevelId & 0xFFCF00) == 0x588000) {
+ // 11001111 (mask, 0xCF)
+ // 10xx0000 (from table)
+ // 10000000 (expected value, 0x80)
+ return kH264Baseline;
+ }
+
+ if ((profileLevelId & 0xFFAF00) == 0x4D0000) {
+ // 10101111 (mask, 0xAF)
+ // 0x0x0000 (from table)
+ // 00000000 (expected value)
+ return kH264Main;
+ }
+
+ if ((profileLevelId & 0xFF0000) == 0x580000) {
+ // 11001111 (mask, 0xCF)
+ // 00xx0000 (from table)
+ // 00000000 (expected value)
+ return kH264Extended;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0x640000) {
+ return kH264High;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0x6E0000) {
+ return kH264High10;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0x7A0000) {
+ return kH264High42;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0xF40000) {
+ return kH264High44;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0x6E1000) {
+ return kH264High10I;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0x7A1000) {
+ return kH264High42I;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0xF41000) {
+ return kH264High44I;
+ }
+
+ if ((profileLevelId & 0xFFFF00) == 0x2C1000) {
+ return kH264CALVC44;
+ }
+
+ return kH264UnknownSubprofile;
+ }
+
+ virtual bool
+ ParametersMatch(const std::string& fmt,
+ const SdpMediaSection& remoteMsection) const override
+ {
+ if (mName == "H264") {
+ SdpFmtpAttributeList::H264Parameters h264Params(
+ GetH264Parameters(fmt, remoteMsection));
+
+ if (h264Params.packetization_mode != mPacketizationMode) {
+ return false;
+ }
+
+ if (GetSubprofile(h264Params.profile_level_id) !=
+ GetSubprofile(mProfileLevelId)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ virtual bool
+ RtcpFbRembIsSet() const
+ {
+ for (const auto& fb : mOtherFbTypes) {
+ if (fb.type == SdpRtcpFbAttributeList::kRemb) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ virtual void
+ UpdateRedundantEncodings(std::vector<JsepCodecDescription*> codecs)
+ {
+ for (const auto codec : codecs) {
+ if (codec->mType == SdpMediaSection::kVideo &&
+ codec->mEnabled &&
+ codec->mName != "red") {
+ uint8_t pt = (uint8_t)strtoul(codec->mDefaultPt.c_str(), nullptr, 10);
+ // returns 0 if failed to convert, and since zero could
+ // be valid, check the defaultPt for 0
+ if (pt == 0 && codec->mDefaultPt != "0") {
+ continue;
+ }
+ mRedundantEncodings.push_back(pt);
+ }
+ }
+ }
+
+ JSEP_CODEC_CLONE(JsepVideoCodecDescription)
+
+ std::vector<std::string> mAckFbTypes;
+ std::vector<std::string> mNackFbTypes;
+ std::vector<std::string> mCcmFbTypes;
+ std::vector<SdpRtcpFbAttributeList::Feedback> mOtherFbTypes;
+ bool mTmmbrEnabled;
+ bool mRembEnabled;
+ bool mFECEnabled;
+ std::vector<uint8_t> mRedundantEncodings;
+
+ // H264-specific stuff
+ uint32_t mProfileLevelId;
+ uint32_t mPacketizationMode;
+ std::string mSpropParameterSets;
+};
+
+class JsepApplicationCodecDescription : public JsepCodecDescription {
+ public:
+ JsepApplicationCodecDescription(const std::string& defaultPt,
+ const std::string& name,
+ uint16_t channels,
+ bool enabled = true)
+ : JsepCodecDescription(mozilla::SdpMediaSection::kApplication, defaultPt,
+ name, 0, channels, enabled)
+ {
+ }
+
+ JSEP_CODEC_CLONE(JsepApplicationCodecDescription)
+
+ // Override, uses sctpmap instead of rtpmap
+ virtual bool
+ Matches(const std::string& fmt,
+ const SdpMediaSection& remoteMsection) const override
+ {
+ if (mType != remoteMsection.GetMediaType()) {
+ return false;
+ }
+
+ const SdpSctpmapAttributeList::Sctpmap* entry(
+ remoteMsection.FindSctpmap(fmt));
+
+ if (entry && !nsCRT::strcasecmp(mName.c_str(), entry->name.c_str())) {
+ return true;
+ }
+ return false;
+ }
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/jsep/JsepSession.h b/media/webrtc/signaling/src/jsep/JsepSession.h
new file mode 100644
index 000000000..29bcbde05
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepSession.h
@@ -0,0 +1,243 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _JSEPSESSION_H_
+#define _JSEPSESSION_H_
+
+#include <string>
+#include <vector>
+#include "mozilla/Maybe.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "nsError.h"
+
+#include "signaling/src/jsep/JsepTransport.h"
+#include "signaling/src/sdp/Sdp.h"
+
+#include "JsepTrack.h"
+
+namespace mozilla {
+
+// Forward declarations
+class JsepCodecDescription;
+class JsepTrack;
+
+enum JsepSignalingState {
+ kJsepStateStable,
+ kJsepStateHaveLocalOffer,
+ kJsepStateHaveRemoteOffer,
+ kJsepStateHaveLocalPranswer,
+ kJsepStateHaveRemotePranswer,
+ kJsepStateClosed
+};
+
+enum JsepSdpType {
+ kJsepSdpOffer,
+ kJsepSdpAnswer,
+ kJsepSdpPranswer,
+ kJsepSdpRollback
+};
+
+struct JsepOAOptions {};
+struct JsepOfferOptions : public JsepOAOptions {
+ Maybe<size_t> mOfferToReceiveAudio;
+ Maybe<size_t> mOfferToReceiveVideo;
+ Maybe<bool> mDontOfferDataChannel;
+ Maybe<bool> mIceRestart; // currently ignored by JsepSession
+};
+struct JsepAnswerOptions : public JsepOAOptions {};
+
+enum JsepBundlePolicy {
+ kBundleBalanced,
+ kBundleMaxCompat,
+ kBundleMaxBundle
+};
+
+class JsepSession
+{
+public:
+ explicit JsepSession(const std::string& name)
+ : mName(name), mState(kJsepStateStable), mNegotiations(0)
+ {
+ }
+ virtual ~JsepSession() {}
+
+ virtual nsresult Init() = 0;
+
+ // Accessors for basic properties.
+ virtual const std::string&
+ GetName() const
+ {
+ return mName;
+ }
+ virtual JsepSignalingState
+ GetState() const
+ {
+ return mState;
+ }
+ virtual uint32_t
+ GetNegotiations() const
+ {
+ return mNegotiations;
+ }
+
+ // Set up the ICE And DTLS data.
+ virtual nsresult SetIceCredentials(const std::string& ufrag,
+ const std::string& pwd) = 0;
+ virtual const std::string& GetUfrag() const = 0;
+ virtual const std::string& GetPwd() const = 0;
+ virtual nsresult SetBundlePolicy(JsepBundlePolicy policy) = 0;
+ virtual bool RemoteIsIceLite() const = 0;
+ virtual bool RemoteIceIsRestarting() const = 0;
+ virtual std::vector<std::string> GetIceOptions() const = 0;
+
+ virtual nsresult AddDtlsFingerprint(const std::string& algorithm,
+ const std::vector<uint8_t>& value) = 0;
+
+ virtual nsresult AddAudioRtpExtension(const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction) = 0;
+ virtual nsresult AddVideoRtpExtension(const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction) = 0;
+
+ // Kinda gross to be locking down the data structure type like this, but
+ // returning by value is problematic due to the lack of stl move semantics in
+ // our build config, since we can't use UniquePtr in the container. The
+ // alternative is writing a raft of accessor functions that allow arbitrary
+ // manipulation (which will be unwieldy), or allowing functors to be injected
+ // that manipulate the data structure (still pretty unwieldy).
+ virtual std::vector<JsepCodecDescription*>& Codecs() = 0;
+
+ template <class UnaryFunction>
+ void ForEachCodec(UnaryFunction& function)
+ {
+ std::for_each(Codecs().begin(), Codecs().end(), function);
+ for (RefPtr<JsepTrack>& track : GetLocalTracks()) {
+ track->ForEachCodec(function);
+ }
+ for (RefPtr<JsepTrack>& track : GetRemoteTracks()) {
+ track->ForEachCodec(function);
+ }
+ }
+
+ template <class BinaryPredicate>
+ void SortCodecs(BinaryPredicate& sorter)
+ {
+ std::stable_sort(Codecs().begin(), Codecs().end(), sorter);
+ for (RefPtr<JsepTrack>& track : GetLocalTracks()) {
+ track->SortCodecs(sorter);
+ }
+ for (RefPtr<JsepTrack>& track : GetRemoteTracks()) {
+ track->SortCodecs(sorter);
+ }
+ }
+
+ // Manage tracks. We take shared ownership of any track.
+ virtual nsresult AddTrack(const RefPtr<JsepTrack>& track) = 0;
+ virtual nsresult RemoveTrack(const std::string& streamId,
+ const std::string& trackId) = 0;
+ virtual nsresult ReplaceTrack(const std::string& oldStreamId,
+ const std::string& oldTrackId,
+ const std::string& newStreamId,
+ const std::string& newTrackId) = 0;
+ virtual nsresult SetParameters(
+ const std::string& streamId,
+ const std::string& trackId,
+ const std::vector<JsepTrack::JsConstraints>& constraints) = 0;
+
+ virtual nsresult GetParameters(
+ const std::string& streamId,
+ const std::string& trackId,
+ std::vector<JsepTrack::JsConstraints>* outConstraints) = 0;
+
+ virtual std::vector<RefPtr<JsepTrack>> GetLocalTracks() const = 0;
+
+ virtual std::vector<RefPtr<JsepTrack>> GetRemoteTracks() const = 0;
+
+ virtual std::vector<RefPtr<JsepTrack>> GetRemoteTracksAdded() const = 0;
+
+ virtual std::vector<RefPtr<JsepTrack>> GetRemoteTracksRemoved() const = 0;
+
+ // Access the negotiated track pairs.
+ virtual std::vector<JsepTrackPair> GetNegotiatedTrackPairs() const = 0;
+
+ // Access transports.
+ virtual std::vector<RefPtr<JsepTransport>> GetTransports() const = 0;
+
+ // Basic JSEP operations.
+ virtual nsresult CreateOffer(const JsepOfferOptions& options,
+ std::string* offer) = 0;
+ virtual nsresult CreateAnswer(const JsepAnswerOptions& options,
+ std::string* answer) = 0;
+ virtual std::string GetLocalDescription() const = 0;
+ virtual std::string GetRemoteDescription() const = 0;
+ virtual nsresult SetLocalDescription(JsepSdpType type,
+ const std::string& sdp) = 0;
+ virtual nsresult SetRemoteDescription(JsepSdpType type,
+ const std::string& sdp) = 0;
+ virtual nsresult AddRemoteIceCandidate(const std::string& candidate,
+ const std::string& mid,
+ uint16_t level) = 0;
+ virtual nsresult AddLocalIceCandidate(const std::string& candidate,
+ uint16_t level,
+ std::string* mid,
+ bool* skipped) = 0;
+ virtual nsresult UpdateDefaultCandidate(
+ const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ uint16_t level) = 0;
+ virtual nsresult EndOfLocalCandidates(uint16_t level) = 0;
+ virtual nsresult Close() = 0;
+
+ // ICE controlling or controlled
+ virtual bool IsIceControlling() const = 0;
+
+ virtual const std::string
+ GetLastError() const
+ {
+ return "Error";
+ }
+
+ static const char*
+ GetStateStr(JsepSignalingState state)
+ {
+ static const char* states[] = { "stable", "have-local-offer",
+ "have-remote-offer", "have-local-pranswer",
+ "have-remote-pranswer", "closed" };
+
+ return states[state];
+ }
+
+ virtual bool AllLocalTracksAreAssigned() const = 0;
+
+ void
+ CountTracks(uint16_t (&receiving)[SdpMediaSection::kMediaTypes],
+ uint16_t (&sending)[SdpMediaSection::kMediaTypes]) const
+ {
+ auto trackPairs = GetNegotiatedTrackPairs();
+
+ memset(receiving, 0, sizeof(receiving));
+ memset(sending, 0, sizeof(sending));
+
+ for (auto& pair : trackPairs) {
+ if (pair.mReceiving) {
+ receiving[pair.mReceiving->GetMediaType()]++;
+ }
+
+ if (pair.mSending) {
+ sending[pair.mSending->GetMediaType()]++;
+ }
+ }
+ }
+
+protected:
+ const std::string mName;
+ JsepSignalingState mState;
+ uint32_t mNegotiations;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp b/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp
new file mode 100644
index 000000000..f5015dda2
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepSessionImpl.cpp
@@ -0,0 +1,2497 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "logging.h"
+
+#include "signaling/src/jsep/JsepSessionImpl.h"
+#include <string>
+#include <set>
+#include <bitset>
+#include <stdlib.h>
+
+#include "nspr.h"
+#include "nss.h"
+#include "pk11pub.h"
+#include "nsDebug.h"
+
+#include <mozilla/Move.h>
+#include <mozilla/UniquePtr.h>
+
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepTransport.h"
+#include "signaling/src/sdp/Sdp.h"
+#include "signaling/src/sdp/SipccSdp.h"
+#include "signaling/src/sdp/SipccSdpParser.h"
+#include "mozilla/net/DataChannelProtocol.h"
+
+namespace mozilla {
+
+MOZ_MTLOG_MODULE("jsep")
+
+#define JSEP_SET_ERROR(error) \
+ do { \
+ std::ostringstream os; \
+ os << error; \
+ mLastError = os.str(); \
+ MOZ_MTLOG(ML_ERROR, mLastError); \
+ } while (0);
+
+static std::bitset<128> GetForbiddenSdpPayloadTypes() {
+ std::bitset<128> forbidden(0);
+ forbidden[1] = true;
+ forbidden[2] = true;
+ forbidden[19] = true;
+ for (uint16_t i = 64; i < 96; ++i) {
+ forbidden[i] = true;
+ }
+ return forbidden;
+}
+
+nsresult
+JsepSessionImpl::Init()
+{
+ mLastError.clear();
+
+ MOZ_ASSERT(!mSessionId, "Init called more than once");
+
+ nsresult rv = SetupIds();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ SetupDefaultCodecs();
+ SetupDefaultRtpExtensions();
+
+ return NS_OK;
+}
+
+// Helper function to find the track for a given m= section.
+template <class T>
+typename std::vector<T>::iterator
+FindTrackByLevel(std::vector<T>& tracks, size_t level)
+{
+ for (auto t = tracks.begin(); t != tracks.end(); ++t) {
+ if (t->mAssignedMLine.isSome() &&
+ (*t->mAssignedMLine == level)) {
+ return t;
+ }
+ }
+
+ return tracks.end();
+}
+
+template <class T>
+typename std::vector<T>::iterator
+FindTrackByIds(std::vector<T>& tracks,
+ const std::string& streamId,
+ const std::string& trackId)
+{
+ for (auto t = tracks.begin(); t != tracks.end(); ++t) {
+ if (t->mTrack->GetStreamId() == streamId &&
+ (t->mTrack->GetTrackId() == trackId)) {
+ return t;
+ }
+ }
+
+ return tracks.end();
+}
+
+template <class T>
+typename std::vector<T>::iterator
+FindUnassignedTrackByType(std::vector<T>& tracks,
+ SdpMediaSection::MediaType type)
+{
+ for (auto t = tracks.begin(); t != tracks.end(); ++t) {
+ if (!t->mAssignedMLine.isSome() &&
+ (t->mTrack->GetMediaType() == type)) {
+ return t;
+ }
+ }
+
+ return tracks.end();
+}
+
+nsresult
+JsepSessionImpl::AddTrack(const RefPtr<JsepTrack>& track)
+{
+ mLastError.clear();
+ MOZ_ASSERT(track->GetDirection() == sdp::kSend);
+
+ if (track->GetMediaType() != SdpMediaSection::kApplication) {
+ track->SetCNAME(mCNAME);
+
+ if (track->GetSsrcs().empty()) {
+ uint32_t ssrc;
+ nsresult rv = CreateSsrc(&ssrc);
+ NS_ENSURE_SUCCESS(rv, rv);
+ track->AddSsrc(ssrc);
+ }
+ }
+
+ track->PopulateCodecs(mSupportedCodecs.values);
+
+ JsepSendingTrack strack;
+ strack.mTrack = track;
+
+ mLocalTracks.push_back(strack);
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::RemoveTrack(const std::string& streamId,
+ const std::string& trackId)
+{
+ if (mState != kJsepStateStable) {
+ JSEP_SET_ERROR("Removing tracks outside of stable is unsupported.");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ auto track = FindTrackByIds(mLocalTracks, streamId, trackId);
+
+ if (track == mLocalTracks.end()) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ mLocalTracks.erase(track);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetIceCredentials(const std::string& ufrag,
+ const std::string& pwd)
+{
+ mLastError.clear();
+ mIceUfrag = ufrag;
+ mIcePwd = pwd;
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetBundlePolicy(JsepBundlePolicy policy)
+{
+ mLastError.clear();
+ if (mCurrentLocalDescription) {
+ JSEP_SET_ERROR("Changing the bundle policy is only supported before the "
+ "first SetLocalDescription.");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ mBundlePolicy = policy;
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::AddDtlsFingerprint(const std::string& algorithm,
+ const std::vector<uint8_t>& value)
+{
+ mLastError.clear();
+ JsepDtlsFingerprint fp;
+
+ fp.mAlgorithm = algorithm;
+ fp.mValue = value;
+
+ mDtlsFingerprints.push_back(fp);
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::AddRtpExtension(std::vector<SdpExtmapAttributeList::Extmap>& extensions,
+ const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction)
+{
+ mLastError.clear();
+
+ if (extensions.size() + 1 > UINT16_MAX) {
+ JSEP_SET_ERROR("Too many rtp extensions have been added");
+ return NS_ERROR_FAILURE;
+ }
+
+ SdpExtmapAttributeList::Extmap extmap =
+ { static_cast<uint16_t>(extensions.size() + 1),
+ direction,
+ direction != SdpDirectionAttribute::kSendrecv, // do we want to specify direction?
+ extensionName,
+ "" };
+
+ extensions.push_back(extmap);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::AddAudioRtpExtension(const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction)
+{
+ return AddRtpExtension(mAudioRtpExtensions, extensionName, direction);
+}
+
+nsresult
+JsepSessionImpl::AddVideoRtpExtension(const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction)
+{
+ return AddRtpExtension(mVideoRtpExtensions, extensionName, direction);
+}
+
+template<class T>
+std::vector<RefPtr<JsepTrack>>
+GetTracks(const std::vector<T>& wrappedTracks)
+{
+ std::vector<RefPtr<JsepTrack>> result;
+ for (auto i = wrappedTracks.begin(); i != wrappedTracks.end(); ++i) {
+ result.push_back(i->mTrack);
+ }
+ return result;
+}
+
+nsresult
+JsepSessionImpl::ReplaceTrack(const std::string& oldStreamId,
+ const std::string& oldTrackId,
+ const std::string& newStreamId,
+ const std::string& newTrackId)
+{
+ auto it = FindTrackByIds(mLocalTracks, oldStreamId, oldTrackId);
+
+ if (it == mLocalTracks.end()) {
+ JSEP_SET_ERROR("Track " << oldStreamId << "/" << oldTrackId
+ << " was never added.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (FindTrackByIds(mLocalTracks, newStreamId, newTrackId) !=
+ mLocalTracks.end()) {
+ JSEP_SET_ERROR("Track " << newStreamId << "/" << newTrackId
+ << " was already added.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ it->mTrack->SetStreamId(newStreamId);
+ it->mTrack->SetTrackId(newTrackId);
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetParameters(const std::string& streamId,
+ const std::string& trackId,
+ const std::vector<JsepTrack::JsConstraints>& constraints)
+{
+ auto it = FindTrackByIds(mLocalTracks, streamId, trackId);
+
+ if (it == mLocalTracks.end()) {
+ JSEP_SET_ERROR("Track " << streamId << "/" << trackId << " was never added.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // Add RtpStreamId Extmap
+ // SdpDirectionAttribute::Direction is a bitmask
+ SdpDirectionAttribute::Direction addVideoExt = SdpDirectionAttribute::kInactive;
+ for (auto constraintEntry: constraints) {
+ if (constraintEntry.rid != "") {
+ if (it->mTrack->GetMediaType() == SdpMediaSection::kVideo) {
+ addVideoExt = static_cast<SdpDirectionAttribute::Direction>(addVideoExt
+ | it->mTrack->GetDirection());
+ }
+ }
+ }
+ if (addVideoExt != SdpDirectionAttribute::kInactive) {
+ AddVideoRtpExtension("urn:ietf:params:rtp-hdrext:sdes:rtp-stream-id", addVideoExt);
+ }
+
+ it->mTrack->SetJsConstraints(constraints);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::GetParameters(const std::string& streamId,
+ const std::string& trackId,
+ std::vector<JsepTrack::JsConstraints>* outConstraints)
+{
+ auto it = FindTrackByIds(mLocalTracks, streamId, trackId);
+
+ if (it == mLocalTracks.end()) {
+ JSEP_SET_ERROR("Track " << streamId << "/" << trackId << " was never added.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ it->mTrack->GetJsConstraints(outConstraints);
+ return NS_OK;
+}
+
+std::vector<RefPtr<JsepTrack>>
+JsepSessionImpl::GetLocalTracks() const
+{
+ return GetTracks(mLocalTracks);
+}
+
+std::vector<RefPtr<JsepTrack>>
+JsepSessionImpl::GetRemoteTracks() const
+{
+ return GetTracks(mRemoteTracks);
+}
+
+std::vector<RefPtr<JsepTrack>>
+JsepSessionImpl::GetRemoteTracksAdded() const
+{
+ return GetTracks(mRemoteTracksAdded);
+}
+
+std::vector<RefPtr<JsepTrack>>
+JsepSessionImpl::GetRemoteTracksRemoved() const
+{
+ return GetTracks(mRemoteTracksRemoved);
+}
+
+nsresult
+JsepSessionImpl::SetupOfferMSections(const JsepOfferOptions& options, Sdp* sdp)
+{
+ // First audio, then video, then datachannel, for interop
+ // TODO(bug 1121756): We need to group these by stream-id, _then_ by media
+ // type, according to the spec. However, this is not going to interop with
+ // older versions of Firefox if a video-only stream is added before an
+ // audio-only stream.
+ // We should probably wait until 38 is ESR before trying to do this.
+ nsresult rv = SetupOfferMSectionsByType(
+ SdpMediaSection::kAudio, options.mOfferToReceiveAudio, sdp);
+
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = SetupOfferMSectionsByType(
+ SdpMediaSection::kVideo, options.mOfferToReceiveVideo, sdp);
+
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!(options.mDontOfferDataChannel.isSome() &&
+ *options.mDontOfferDataChannel)) {
+ rv = SetupOfferMSectionsByType(
+ SdpMediaSection::kApplication, Maybe<size_t>(), sdp);
+
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (!sdp->GetMediaSectionCount()) {
+ JSEP_SET_ERROR("Cannot create an offer with no local tracks, "
+ "no offerToReceiveAudio/Video, and no DataChannel.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetupOfferMSectionsByType(SdpMediaSection::MediaType mediatype,
+ Maybe<size_t> offerToReceiveMaybe,
+ Sdp* sdp)
+{
+ // Convert the Maybe into a size_t*, since that is more readable, especially
+ // when using it as an in/out param.
+ size_t offerToReceiveCount;
+ size_t* offerToReceiveCountPtr = nullptr;
+
+ if (offerToReceiveMaybe) {
+ offerToReceiveCount = *offerToReceiveMaybe;
+ offerToReceiveCountPtr = &offerToReceiveCount;
+ }
+
+ // Make sure every local track has an m-section
+ nsresult rv = BindLocalTracks(mediatype, sdp);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Make sure that m-sections that previously had a remote track have the
+ // recv bit set. Only matters for renegotiation.
+ rv = BindRemoteTracks(mediatype, sdp, offerToReceiveCountPtr);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // If we need more recv sections, start setting the recv bit on other
+ // msections. If not, disable msections that have no tracks.
+ rv = SetRecvAsNeededOrDisable(mediatype,
+ sdp,
+ offerToReceiveCountPtr);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // If we still don't have enough recv m-sections, add some.
+ if (offerToReceiveCountPtr && *offerToReceiveCountPtr) {
+ rv = AddRecvonlyMsections(mediatype, *offerToReceiveCountPtr, sdp);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::BindLocalTracks(SdpMediaSection::MediaType mediatype, Sdp* sdp)
+{
+ for (JsepSendingTrack& track : mLocalTracks) {
+ if (mediatype != track.mTrack->GetMediaType()) {
+ continue;
+ }
+
+ SdpMediaSection* msection;
+ if (track.mAssignedMLine.isSome()) {
+ msection = &sdp->GetMediaSection(*track.mAssignedMLine);
+ } else {
+ nsresult rv = GetFreeMsectionForSend(track.mTrack->GetMediaType(),
+ sdp,
+ &msection);
+ NS_ENSURE_SUCCESS(rv, rv);
+ track.mAssignedMLine = Some(msection->GetLevel());
+ }
+
+ track.mTrack->AddToOffer(msection);
+ }
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::BindRemoteTracks(SdpMediaSection::MediaType mediatype,
+ Sdp* sdp,
+ size_t* offerToReceive)
+{
+ for (JsepReceivingTrack& track : mRemoteTracks) {
+ if (mediatype != track.mTrack->GetMediaType()) {
+ continue;
+ }
+
+ if (!track.mAssignedMLine.isSome()) {
+ MOZ_ASSERT(false);
+ continue;
+ }
+
+ auto& msection = sdp->GetMediaSection(*track.mAssignedMLine);
+
+ if (mSdpHelper.MsectionIsDisabled(msection)) {
+ // TODO(bug 1095226) Content probably disabled this? Should we allow
+ // content to do this?
+ continue;
+ }
+
+ track.mTrack->AddToOffer(&msection);
+
+ if (offerToReceive && *offerToReceive) {
+ --(*offerToReceive);
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetRecvAsNeededOrDisable(SdpMediaSection::MediaType mediatype,
+ Sdp* sdp,
+ size_t* offerToRecv)
+{
+ for (size_t i = 0; i < sdp->GetMediaSectionCount(); ++i) {
+ auto& msection = sdp->GetMediaSection(i);
+
+ if (mSdpHelper.MsectionIsDisabled(msection) ||
+ msection.GetMediaType() != mediatype ||
+ msection.IsReceiving()) {
+ continue;
+ }
+
+ if (offerToRecv) {
+ if (*offerToRecv) {
+ SetupOfferToReceiveMsection(&msection);
+ --(*offerToRecv);
+ continue;
+ }
+ } else if (msection.IsSending()) {
+ SetupOfferToReceiveMsection(&msection);
+ continue;
+ }
+
+ if (!msection.IsSending()) {
+ // Unused m-section, and no reason to offer to recv on it
+ mSdpHelper.DisableMsection(sdp, &msection);
+ }
+ }
+
+ return NS_OK;
+}
+
+void
+JsepSessionImpl::SetupOfferToReceiveMsection(SdpMediaSection* offer)
+{
+ // Create a dummy recv track, and have it add codecs, set direction, etc.
+ RefPtr<JsepTrack> dummy = new JsepTrack(offer->GetMediaType(),
+ "",
+ "",
+ sdp::kRecv);
+ dummy->PopulateCodecs(mSupportedCodecs.values);
+ dummy->AddToOffer(offer);
+}
+
+nsresult
+JsepSessionImpl::AddRecvonlyMsections(SdpMediaSection::MediaType mediatype,
+ size_t count,
+ Sdp* sdp)
+{
+ while (count--) {
+ nsresult rv = CreateOfferMSection(
+ mediatype,
+ mSdpHelper.GetProtocolForMediaType(mediatype),
+ SdpDirectionAttribute::kRecvonly,
+ sdp);
+
+ NS_ENSURE_SUCCESS(rv, rv);
+ SetupOfferToReceiveMsection(
+ &sdp->GetMediaSection(sdp->GetMediaSectionCount() - 1));
+ }
+ return NS_OK;
+}
+
+// This function creates a skeleton SDP based on the old descriptions
+// (ie; all m-sections are inactive).
+nsresult
+JsepSessionImpl::AddReofferMsections(const Sdp& oldLocalSdp,
+ const Sdp& oldAnswer,
+ Sdp* newSdp)
+{
+ nsresult rv;
+
+ for (size_t i = 0; i < oldLocalSdp.GetMediaSectionCount(); ++i) {
+ // We do not set the direction in this function (or disable when previously
+ // disabled), that happens in |SetupOfferMSectionsByType|
+ rv = CreateOfferMSection(oldLocalSdp.GetMediaSection(i).GetMediaType(),
+ oldLocalSdp.GetMediaSection(i).GetProtocol(),
+ SdpDirectionAttribute::kInactive,
+ newSdp);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = mSdpHelper.CopyStickyParams(oldAnswer.GetMediaSection(i),
+ &newSdp->GetMediaSection(i));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+void
+JsepSessionImpl::SetupBundle(Sdp* sdp) const
+{
+ std::vector<std::string> mids;
+ std::set<SdpMediaSection::MediaType> observedTypes;
+
+ // This has the effect of changing the bundle level if the first m-section
+ // goes from disabled to enabled. This is kinda inefficient.
+
+ for (size_t i = 0; i < sdp->GetMediaSectionCount(); ++i) {
+ auto& attrs = sdp->GetMediaSection(i).GetAttributeList();
+ if (attrs.HasAttribute(SdpAttribute::kMidAttribute)) {
+ bool useBundleOnly = false;
+ switch (mBundlePolicy) {
+ case kBundleMaxCompat:
+ // We don't use bundle-only for max-compat
+ break;
+ case kBundleBalanced:
+ // balanced means we use bundle-only on everything but the first
+ // m-section of a given type
+ if (observedTypes.count(sdp->GetMediaSection(i).GetMediaType())) {
+ useBundleOnly = true;
+ }
+ observedTypes.insert(sdp->GetMediaSection(i).GetMediaType());
+ break;
+ case kBundleMaxBundle:
+ // max-bundle means we use bundle-only on everything but the first
+ // m-section
+ useBundleOnly = !mids.empty();
+ break;
+ }
+
+ if (useBundleOnly) {
+ attrs.SetAttribute(
+ new SdpFlagAttribute(SdpAttribute::kBundleOnlyAttribute));
+ }
+
+ mids.push_back(attrs.GetMid());
+ }
+ }
+
+ if (mids.size() > 1) {
+ UniquePtr<SdpGroupAttributeList> groupAttr(new SdpGroupAttributeList);
+ groupAttr->PushEntry(SdpGroupAttributeList::kBundle, mids);
+ sdp->GetAttributeList().SetAttribute(groupAttr.release());
+ }
+}
+
+nsresult
+JsepSessionImpl::GetRemoteIds(const Sdp& sdp,
+ const SdpMediaSection& msection,
+ std::string* streamId,
+ std::string* trackId)
+{
+ nsresult rv = mSdpHelper.GetIdsFromMsid(sdp, msection, streamId, trackId);
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ *streamId = mDefaultRemoteStreamId;
+
+ if (!mDefaultRemoteTrackIdsByLevel.count(msection.GetLevel())) {
+ // Generate random track ids.
+ if (!mUuidGen->Generate(trackId)) {
+ JSEP_SET_ERROR("Failed to generate UUID for JsepTrack");
+ return NS_ERROR_FAILURE;
+ }
+
+ mDefaultRemoteTrackIdsByLevel[msection.GetLevel()] = *trackId;
+ } else {
+ *trackId = mDefaultRemoteTrackIdsByLevel[msection.GetLevel()];
+ }
+ return NS_OK;
+ }
+
+ if (NS_SUCCEEDED(rv)) {
+ // If, for whatever reason, the other end renegotiates with an msid where
+ // there wasn't one before, don't allow the old default to pop up again
+ // later.
+ mDefaultRemoteTrackIdsByLevel.erase(msection.GetLevel());
+ }
+
+ return rv;
+}
+
+nsresult
+JsepSessionImpl::CreateOffer(const JsepOfferOptions& options,
+ std::string* offer)
+{
+ mLastError.clear();
+
+ if (mState != kJsepStateStable) {
+ JSEP_SET_ERROR("Cannot create offer in state " << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // Undo track assignments from a previous call to CreateOffer
+ // (ie; if the track has not been negotiated yet, it doesn't necessarily need
+ // to stay in the same m-section that it was in)
+ for (JsepSendingTrack& trackWrapper : mLocalTracks) {
+ if (!trackWrapper.mTrack->GetNegotiatedDetails()) {
+ trackWrapper.mAssignedMLine.reset();
+ }
+ }
+
+ UniquePtr<Sdp> sdp;
+
+ // Make the basic SDP that is common to offer/answer.
+ nsresult rv = CreateGenericSDP(&sdp);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (mCurrentLocalDescription) {
+ rv = AddReofferMsections(*mCurrentLocalDescription,
+ *GetAnswer(),
+ sdp.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ // Ensure that we have all the m-sections we need, and disable extras
+ rv = SetupOfferMSections(options, sdp.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ SetupBundle(sdp.get());
+
+ if (mCurrentLocalDescription) {
+ rv = CopyPreviousTransportParams(*GetAnswer(),
+ *mCurrentLocalDescription,
+ *sdp,
+ sdp.get());
+ NS_ENSURE_SUCCESS(rv,rv);
+ }
+
+ *offer = sdp->ToString();
+ mGeneratedLocalDescription = Move(sdp);
+ ++mSessionVersion;
+
+ return NS_OK;
+}
+
+std::string
+JsepSessionImpl::GetLocalDescription() const
+{
+ std::ostringstream os;
+ mozilla::Sdp* sdp = GetParsedLocalDescription();
+ if (sdp) {
+ sdp->Serialize(os);
+ }
+ return os.str();
+}
+
+std::string
+JsepSessionImpl::GetRemoteDescription() const
+{
+ std::ostringstream os;
+ mozilla::Sdp* sdp = GetParsedRemoteDescription();
+ if (sdp) {
+ sdp->Serialize(os);
+ }
+ return os.str();
+}
+
+void
+JsepSessionImpl::AddExtmap(SdpMediaSection* msection) const
+{
+ const auto* extensions = GetRtpExtensions(msection->GetMediaType());
+
+ if (extensions && !extensions->empty()) {
+ SdpExtmapAttributeList* extmap = new SdpExtmapAttributeList;
+ extmap->mExtmaps = *extensions;
+ msection->GetAttributeList().SetAttribute(extmap);
+ }
+}
+
+void
+JsepSessionImpl::AddMid(const std::string& mid,
+ SdpMediaSection* msection) const
+{
+ msection->GetAttributeList().SetAttribute(new SdpStringAttribute(
+ SdpAttribute::kMidAttribute, mid));
+}
+
+const std::vector<SdpExtmapAttributeList::Extmap>*
+JsepSessionImpl::GetRtpExtensions(SdpMediaSection::MediaType type) const
+{
+ switch (type) {
+ case SdpMediaSection::kAudio:
+ return &mAudioRtpExtensions;
+ case SdpMediaSection::kVideo:
+ return &mVideoRtpExtensions;
+ default:
+ return nullptr;
+ }
+}
+
+void
+JsepSessionImpl::AddCommonExtmaps(const SdpMediaSection& remoteMsection,
+ SdpMediaSection* msection)
+{
+ auto* ourExtensions = GetRtpExtensions(remoteMsection.GetMediaType());
+
+ if (ourExtensions) {
+ mSdpHelper.AddCommonExtmaps(remoteMsection, *ourExtensions, msection);
+ }
+}
+
+nsresult
+JsepSessionImpl::CreateAnswer(const JsepAnswerOptions& options,
+ std::string* answer)
+{
+ mLastError.clear();
+
+ if (mState != kJsepStateHaveRemoteOffer) {
+ JSEP_SET_ERROR("Cannot create answer in state " << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // This is the heart of the negotiation code. Depressing that it's
+ // so bad.
+ //
+ // Here's the current algorithm:
+ // 1. Walk through all the m-lines on the other side.
+ // 2. For each m-line, walk through all of our local tracks
+ // in sequence and see if any are unassigned. If so, assign
+ // them and mark it sendrecv, otherwise it's recvonly.
+ // 3. Just replicate their media attributes.
+ // 4. Profit.
+ UniquePtr<Sdp> sdp;
+
+ // Make the basic SDP that is common to offer/answer.
+ nsresult rv = CreateGenericSDP(&sdp);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ const Sdp& offer = *mPendingRemoteDescription;
+
+ // Copy the bundle groups into our answer
+ UniquePtr<SdpGroupAttributeList> groupAttr(new SdpGroupAttributeList);
+ mSdpHelper.GetBundleGroups(offer, &groupAttr->mGroups);
+ sdp->GetAttributeList().SetAttribute(groupAttr.release());
+
+ // Disable send for local tracks if the offer no longer allows it
+ // (i.e., the m-section is recvonly, inactive or disabled)
+ for (JsepSendingTrack& trackWrapper : mLocalTracks) {
+ if (!trackWrapper.mAssignedMLine.isSome()) {
+ continue;
+ }
+
+ // Get rid of all m-line assignments that have not been negotiated
+ if (!trackWrapper.mTrack->GetNegotiatedDetails()) {
+ trackWrapper.mAssignedMLine.reset();
+ continue;
+ }
+
+ if (!offer.GetMediaSection(*trackWrapper.mAssignedMLine).IsReceiving()) {
+ trackWrapper.mAssignedMLine.reset();
+ }
+ }
+
+ size_t numMsections = offer.GetMediaSectionCount();
+
+ for (size_t i = 0; i < numMsections; ++i) {
+ const SdpMediaSection& remoteMsection = offer.GetMediaSection(i);
+ rv = CreateAnswerMSection(options, i, remoteMsection, sdp.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (mCurrentLocalDescription) {
+ // per discussion with bwc, 3rd parm here should be offer, not *sdp. (mjf)
+ rv = CopyPreviousTransportParams(*GetAnswer(),
+ *mCurrentRemoteDescription,
+ offer,
+ sdp.get());
+ NS_ENSURE_SUCCESS(rv,rv);
+ }
+
+ *answer = sdp->ToString();
+ mGeneratedLocalDescription = Move(sdp);
+ ++mSessionVersion;
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::CreateOfferMSection(SdpMediaSection::MediaType mediatype,
+ SdpMediaSection::Protocol proto,
+ SdpDirectionAttribute::Direction dir,
+ Sdp* sdp)
+{
+ SdpMediaSection* msection =
+ &sdp->AddMediaSection(mediatype, dir, 0, proto, sdp::kIPv4, "0.0.0.0");
+
+ return EnableOfferMsection(msection);
+}
+
+nsresult
+JsepSessionImpl::GetFreeMsectionForSend(
+ SdpMediaSection::MediaType type,
+ Sdp* sdp,
+ SdpMediaSection** msectionOut)
+{
+ for (size_t i = 0; i < sdp->GetMediaSectionCount(); ++i) {
+ SdpMediaSection& msection = sdp->GetMediaSection(i);
+ // draft-ietf-rtcweb-jsep-08 says we should reclaim disabled m-sections
+ // regardless of media type. This breaks some pretty fundamental rules of
+ // SDP offer/answer, so we probably should not do it.
+ if (msection.GetMediaType() != type) {
+ continue;
+ }
+
+ if (FindTrackByLevel(mLocalTracks, i) != mLocalTracks.end()) {
+ // Not free
+ continue;
+ }
+
+ if (mSdpHelper.MsectionIsDisabled(msection)) {
+ // Was disabled; revive
+ nsresult rv = EnableOfferMsection(&msection);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ *msectionOut = &msection;
+ return NS_OK;
+ }
+
+ // Ok, no pre-existing m-section. Make a new one.
+ nsresult rv = CreateOfferMSection(type,
+ mSdpHelper.GetProtocolForMediaType(type),
+ SdpDirectionAttribute::kInactive,
+ sdp);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *msectionOut = &sdp->GetMediaSection(sdp->GetMediaSectionCount() - 1);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::CreateAnswerMSection(const JsepAnswerOptions& options,
+ size_t mlineIndex,
+ const SdpMediaSection& remoteMsection,
+ Sdp* sdp)
+{
+ SdpMediaSection& msection =
+ sdp->AddMediaSection(remoteMsection.GetMediaType(),
+ SdpDirectionAttribute::kInactive,
+ 9,
+ remoteMsection.GetProtocol(),
+ sdp::kIPv4,
+ "0.0.0.0");
+
+ nsresult rv = mSdpHelper.CopyStickyParams(remoteMsection, &msection);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (mSdpHelper.MsectionIsDisabled(remoteMsection)) {
+ mSdpHelper.DisableMsection(sdp, &msection);
+ return NS_OK;
+ }
+
+ SdpSetupAttribute::Role role;
+ rv = DetermineAnswererSetupRole(remoteMsection, &role);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = AddTransportAttributes(&msection, role);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = SetRecvonlySsrc(&msection);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Only attempt to match up local tracks if the offerer has elected to
+ // receive traffic.
+ if (remoteMsection.IsReceiving()) {
+ rv = BindMatchingLocalTrackToAnswer(&msection);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ if (remoteMsection.IsSending()) {
+ BindMatchingRemoteTrackToAnswer(&msection);
+ }
+
+ if (!msection.IsReceiving() && !msection.IsSending()) {
+ mSdpHelper.DisableMsection(sdp, &msection);
+ return NS_OK;
+ }
+
+ // Add extmap attributes.
+ AddCommonExtmaps(remoteMsection, &msection);
+
+ if (msection.GetFormats().empty()) {
+ // Could not negotiate anything. Disable m-section.
+ mSdpHelper.DisableMsection(sdp, &msection);
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetRecvonlySsrc(SdpMediaSection* msection)
+{
+ // If previous m-sections are disabled, we do not call this function for them
+ while (mRecvonlySsrcs.size() <= msection->GetLevel()) {
+ uint32_t ssrc;
+ nsresult rv = CreateSsrc(&ssrc);
+ NS_ENSURE_SUCCESS(rv, rv);
+ mRecvonlySsrcs.push_back(ssrc);
+ }
+
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(mRecvonlySsrcs[msection->GetLevel()]);
+ msection->SetSsrcs(ssrcs, mCNAME);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::BindMatchingLocalTrackToAnswer(SdpMediaSection* msection)
+{
+ auto track = FindTrackByLevel(mLocalTracks, msection->GetLevel());
+
+ if (track == mLocalTracks.end()) {
+ track = FindUnassignedTrackByType(mLocalTracks, msection->GetMediaType());
+ }
+
+ if (track == mLocalTracks.end() &&
+ msection->GetMediaType() == SdpMediaSection::kApplication) {
+ // If we are offered datachannel, we need to play along even if no track
+ // for it has been added yet.
+ std::string streamId;
+ std::string trackId;
+
+ if (!mUuidGen->Generate(&streamId) || !mUuidGen->Generate(&trackId)) {
+ JSEP_SET_ERROR("Failed to generate UUIDs for datachannel track");
+ return NS_ERROR_FAILURE;
+ }
+
+ AddTrack(RefPtr<JsepTrack>(
+ new JsepTrack(SdpMediaSection::kApplication, streamId, trackId)));
+ track = FindUnassignedTrackByType(mLocalTracks, msection->GetMediaType());
+ MOZ_ASSERT(track != mLocalTracks.end());
+ }
+
+ if (track != mLocalTracks.end()) {
+ track->mAssignedMLine = Some(msection->GetLevel());
+ track->mTrack->AddToAnswer(
+ mPendingRemoteDescription->GetMediaSection(msection->GetLevel()),
+ msection);
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::BindMatchingRemoteTrackToAnswer(SdpMediaSection* msection)
+{
+ auto it = FindTrackByLevel(mRemoteTracks, msection->GetLevel());
+ if (it == mRemoteTracks.end()) {
+ MOZ_ASSERT(false);
+ JSEP_SET_ERROR("Failed to find remote track for local answer m-section");
+ return NS_ERROR_FAILURE;
+ }
+
+ it->mTrack->AddToAnswer(
+ mPendingRemoteDescription->GetMediaSection(msection->GetLevel()),
+ msection);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::DetermineAnswererSetupRole(
+ const SdpMediaSection& remoteMsection,
+ SdpSetupAttribute::Role* rolep)
+{
+ // Determine the role.
+ // RFC 5763 says:
+ //
+ // The endpoint MUST use the setup attribute defined in [RFC4145].
+ // The endpoint that is the offerer MUST use the setup attribute
+ // value of setup:actpass and be prepared to receive a client_hello
+ // before it receives the answer. The answerer MUST use either a
+ // setup attribute value of setup:active or setup:passive. Note that
+ // if the answerer uses setup:passive, then the DTLS handshake will
+ // not begin until the answerer is received, which adds additional
+ // latency. setup:active allows the answer and the DTLS handshake to
+ // occur in parallel. Thus, setup:active is RECOMMENDED. Whichever
+ // party is active MUST initiate a DTLS handshake by sending a
+ // ClientHello over each flow (host/port quartet).
+ //
+ // We default to assuming that the offerer is passive and we are active.
+ SdpSetupAttribute::Role role = SdpSetupAttribute::kActive;
+
+ if (remoteMsection.GetAttributeList().HasAttribute(
+ SdpAttribute::kSetupAttribute)) {
+ switch (remoteMsection.GetAttributeList().GetSetup().mRole) {
+ case SdpSetupAttribute::kActive:
+ role = SdpSetupAttribute::kPassive;
+ break;
+ case SdpSetupAttribute::kPassive:
+ case SdpSetupAttribute::kActpass:
+ role = SdpSetupAttribute::kActive;
+ break;
+ case SdpSetupAttribute::kHoldconn:
+ // This should have been caught by ParseSdp
+ MOZ_ASSERT(false);
+ JSEP_SET_ERROR("The other side used an illegal setup attribute"
+ " (\"holdconn\").");
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+
+ *rolep = role;
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetLocalDescription(JsepSdpType type, const std::string& sdp)
+{
+ mLastError.clear();
+
+ MOZ_MTLOG(ML_DEBUG, "SetLocalDescription type=" << type << "\nSDP=\n"
+ << sdp);
+
+ if (type == kJsepSdpRollback) {
+ if (mState != kJsepStateHaveLocalOffer) {
+ JSEP_SET_ERROR("Cannot rollback local description in "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ mPendingLocalDescription.reset();
+ SetState(kJsepStateStable);
+ mTransports = mOldTransports;
+ mOldTransports.clear();
+ return NS_OK;
+ }
+
+ switch (mState) {
+ case kJsepStateStable:
+ if (type != kJsepSdpOffer) {
+ JSEP_SET_ERROR("Cannot set local answer in state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+ mIsOfferer = true;
+ break;
+ case kJsepStateHaveRemoteOffer:
+ if (type != kJsepSdpAnswer && type != kJsepSdpPranswer) {
+ JSEP_SET_ERROR("Cannot set local offer in state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+ break;
+ default:
+ JSEP_SET_ERROR("Cannot set local offer or answer in state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ UniquePtr<Sdp> parsed;
+ nsresult rv = ParseSdp(sdp, &parsed);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Check that content hasn't done anything unsupported with the SDP
+ rv = ValidateLocalDescription(*parsed);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Create transport objects.
+ mOldTransports = mTransports; // Save in case we need to rollback
+ mTransports.clear();
+ for (size_t t = 0; t < parsed->GetMediaSectionCount(); ++t) {
+ mTransports.push_back(RefPtr<JsepTransport>(new JsepTransport));
+ InitTransport(parsed->GetMediaSection(t), mTransports[t].get());
+ }
+
+ switch (type) {
+ case kJsepSdpOffer:
+ rv = SetLocalDescriptionOffer(Move(parsed));
+ break;
+ case kJsepSdpAnswer:
+ case kJsepSdpPranswer:
+ rv = SetLocalDescriptionAnswer(type, Move(parsed));
+ break;
+ case kJsepSdpRollback:
+ MOZ_CRASH(); // Handled above
+ }
+
+ return rv;
+}
+
+nsresult
+JsepSessionImpl::SetLocalDescriptionOffer(UniquePtr<Sdp> offer)
+{
+ MOZ_ASSERT(mState == kJsepStateStable);
+ mPendingLocalDescription = Move(offer);
+ SetState(kJsepStateHaveLocalOffer);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetLocalDescriptionAnswer(JsepSdpType type,
+ UniquePtr<Sdp> answer)
+{
+ MOZ_ASSERT(mState == kJsepStateHaveRemoteOffer);
+ mPendingLocalDescription = Move(answer);
+
+ nsresult rv = ValidateAnswer(*mPendingRemoteDescription,
+ *mPendingLocalDescription);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = HandleNegotiatedSession(mPendingLocalDescription,
+ mPendingRemoteDescription);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mCurrentRemoteDescription = Move(mPendingRemoteDescription);
+ mCurrentLocalDescription = Move(mPendingLocalDescription);
+ mWasOffererLastTime = mIsOfferer;
+
+ SetState(kJsepStateStable);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetRemoteDescription(JsepSdpType type, const std::string& sdp)
+{
+ mLastError.clear();
+ mRemoteTracksAdded.clear();
+ mRemoteTracksRemoved.clear();
+
+ MOZ_MTLOG(ML_DEBUG, "SetRemoteDescription type=" << type << "\nSDP=\n"
+ << sdp);
+
+ if (type == kJsepSdpRollback) {
+ if (mState != kJsepStateHaveRemoteOffer) {
+ JSEP_SET_ERROR("Cannot rollback remote description in "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ mPendingRemoteDescription.reset();
+ SetState(kJsepStateStable);
+
+ // Update the remote tracks to what they were before the SetRemote
+ return SetRemoteTracksFromDescription(mCurrentRemoteDescription.get());
+ }
+
+ switch (mState) {
+ case kJsepStateStable:
+ if (type != kJsepSdpOffer) {
+ JSEP_SET_ERROR("Cannot set remote answer in state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+ mIsOfferer = false;
+ break;
+ case kJsepStateHaveLocalOffer:
+ case kJsepStateHaveRemotePranswer:
+ if (type != kJsepSdpAnswer && type != kJsepSdpPranswer) {
+ JSEP_SET_ERROR("Cannot set remote offer in state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+ break;
+ default:
+ JSEP_SET_ERROR("Cannot set remote offer or answer in current state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // Parse.
+ UniquePtr<Sdp> parsed;
+ nsresult rv = ParseSdp(sdp, &parsed);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = ValidateRemoteDescription(*parsed);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool iceLite =
+ parsed->GetAttributeList().HasAttribute(SdpAttribute::kIceLiteAttribute);
+
+ // check for mismatch ufrag/pwd indicating ice restart
+ // can't just check the first one because it might be disabled
+ bool iceRestarting = false;
+ if (mCurrentRemoteDescription.get()) {
+ for (size_t i = 0;
+ !iceRestarting &&
+ i < mCurrentRemoteDescription->GetMediaSectionCount();
+ ++i) {
+
+ const SdpMediaSection& newMsection = parsed->GetMediaSection(i);
+ const SdpMediaSection& oldMsection =
+ mCurrentRemoteDescription->GetMediaSection(i);
+
+ if (mSdpHelper.MsectionIsDisabled(newMsection) ||
+ mSdpHelper.MsectionIsDisabled(oldMsection)) {
+ continue;
+ }
+
+ iceRestarting = mSdpHelper.IceCredentialsDiffer(newMsection, oldMsection);
+ }
+ }
+
+ std::vector<std::string> iceOptions;
+ if (parsed->GetAttributeList().HasAttribute(
+ SdpAttribute::kIceOptionsAttribute)) {
+ iceOptions = parsed->GetAttributeList().GetIceOptions().mValues;
+ }
+
+ switch (type) {
+ case kJsepSdpOffer:
+ rv = SetRemoteDescriptionOffer(Move(parsed));
+ break;
+ case kJsepSdpAnswer:
+ case kJsepSdpPranswer:
+ rv = SetRemoteDescriptionAnswer(type, Move(parsed));
+ break;
+ case kJsepSdpRollback:
+ MOZ_CRASH(); // Handled above
+ }
+
+ if (NS_SUCCEEDED(rv)) {
+ mRemoteIsIceLite = iceLite;
+ mIceOptions = iceOptions;
+ mRemoteIceIsRestarting = iceRestarting;
+ }
+
+ return rv;
+}
+
+nsresult
+JsepSessionImpl::HandleNegotiatedSession(const UniquePtr<Sdp>& local,
+ const UniquePtr<Sdp>& remote)
+{
+ bool remoteIceLite =
+ remote->GetAttributeList().HasAttribute(SdpAttribute::kIceLiteAttribute);
+
+ mIceControlling = remoteIceLite || mIsOfferer;
+
+ const Sdp& answer = mIsOfferer ? *remote : *local;
+
+ SdpHelper::BundledMids bundledMids;
+ nsresult rv = mSdpHelper.GetBundledMids(answer, &bundledMids);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (mTransports.size() < local->GetMediaSectionCount()) {
+ JSEP_SET_ERROR("Fewer transports set up than m-lines");
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ for (JsepSendingTrack& trackWrapper : mLocalTracks) {
+ trackWrapper.mTrack->ClearNegotiatedDetails();
+ }
+
+ for (JsepReceivingTrack& trackWrapper : mRemoteTracks) {
+ trackWrapper.mTrack->ClearNegotiatedDetails();
+ }
+
+ std::vector<JsepTrackPair> trackPairs;
+
+ // Now walk through the m-sections, make sure they match, and create
+ // track pairs that describe the media to be set up.
+ for (size_t i = 0; i < local->GetMediaSectionCount(); ++i) {
+ // Skip disabled m-sections.
+ if (answer.GetMediaSection(i).GetPort() == 0) {
+ mTransports[i]->Close();
+ continue;
+ }
+
+ // The transport details are not necessarily on the m-section we're
+ // currently processing.
+ size_t transportLevel = i;
+ bool usingBundle = false;
+ {
+ const SdpMediaSection& answerMsection(answer.GetMediaSection(i));
+ if (answerMsection.GetAttributeList().HasAttribute(
+ SdpAttribute::kMidAttribute)) {
+ if (bundledMids.count(answerMsection.GetAttributeList().GetMid())) {
+ const SdpMediaSection* masterBundleMsection =
+ bundledMids[answerMsection.GetAttributeList().GetMid()];
+ transportLevel = masterBundleMsection->GetLevel();
+ usingBundle = true;
+ if (i != transportLevel) {
+ mTransports[i]->Close();
+ }
+ }
+ }
+ }
+
+ RefPtr<JsepTransport> transport = mTransports[transportLevel];
+
+ rv = FinalizeTransport(
+ remote->GetMediaSection(transportLevel).GetAttributeList(),
+ answer.GetMediaSection(transportLevel).GetAttributeList(),
+ transport);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ JsepTrackPair trackPair;
+ rv = MakeNegotiatedTrackPair(remote->GetMediaSection(i),
+ local->GetMediaSection(i),
+ transport,
+ usingBundle,
+ transportLevel,
+ &trackPair);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ trackPairs.push_back(trackPair);
+ }
+
+ JsepTrack::SetUniquePayloadTypes(GetTracks(mRemoteTracks));
+
+ // Ouch, this probably needs some dirty bit instead of just clearing
+ // stuff for renegotiation.
+ mNegotiatedTrackPairs = trackPairs;
+
+ mGeneratedLocalDescription.reset();
+
+ mNegotiations++;
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::MakeNegotiatedTrackPair(const SdpMediaSection& remote,
+ const SdpMediaSection& local,
+ const RefPtr<JsepTransport>& transport,
+ bool usingBundle,
+ size_t transportLevel,
+ JsepTrackPair* trackPairOut)
+{
+ MOZ_ASSERT(transport->mComponents);
+ const SdpMediaSection& answer = mIsOfferer ? remote : local;
+
+ bool sending;
+ bool receiving;
+
+ if (mIsOfferer) {
+ receiving = answer.IsSending();
+ sending = answer.IsReceiving();
+ } else {
+ sending = answer.IsSending();
+ receiving = answer.IsReceiving();
+ }
+
+ MOZ_MTLOG(ML_DEBUG, "Negotiated m= line"
+ << " index=" << local.GetLevel()
+ << " type=" << local.GetMediaType()
+ << " sending=" << sending
+ << " receiving=" << receiving);
+
+ trackPairOut->mLevel = local.GetLevel();
+
+ MOZ_ASSERT(mRecvonlySsrcs.size() > local.GetLevel(),
+ "Failed to set the default ssrc for an active m-section");
+ trackPairOut->mRecvonlySsrc = mRecvonlySsrcs[local.GetLevel()];
+
+ if (usingBundle) {
+ trackPairOut->mBundleLevel = Some(transportLevel);
+ }
+
+ auto sendTrack = FindTrackByLevel(mLocalTracks, local.GetLevel());
+ if (sendTrack != mLocalTracks.end()) {
+ sendTrack->mTrack->Negotiate(answer, remote);
+ sendTrack->mTrack->SetActive(sending);
+ trackPairOut->mSending = sendTrack->mTrack;
+ } else if (sending) {
+ JSEP_SET_ERROR("Failed to find local track for level " <<
+ local.GetLevel()
+ << " in local SDP. This should never happen.");
+ NS_ASSERTION(false, "Failed to find local track for level");
+ return NS_ERROR_FAILURE;
+ }
+
+ auto recvTrack = FindTrackByLevel(mRemoteTracks, local.GetLevel());
+ if (recvTrack != mRemoteTracks.end()) {
+ recvTrack->mTrack->Negotiate(answer, remote);
+ recvTrack->mTrack->SetActive(receiving);
+ trackPairOut->mReceiving = recvTrack->mTrack;
+
+ if (receiving &&
+ trackPairOut->mBundleLevel.isSome() &&
+ recvTrack->mTrack->GetSsrcs().empty() &&
+ recvTrack->mTrack->GetMediaType() != SdpMediaSection::kApplication) {
+ MOZ_MTLOG(ML_ERROR, "Bundled m-section has no ssrc attributes. "
+ "This may cause media packets to be dropped.");
+ }
+ } else if (receiving) {
+ JSEP_SET_ERROR("Failed to find remote track for level "
+ << local.GetLevel()
+ << " in remote SDP. This should never happen.");
+ NS_ASSERTION(false, "Failed to find remote track for level");
+ return NS_ERROR_FAILURE;
+ }
+
+ trackPairOut->mRtpTransport = transport;
+
+ if (transport->mComponents == 2) {
+ // RTCP MUX or not.
+ // TODO(bug 1095743): verify that the PTs are consistent with mux.
+ MOZ_MTLOG(ML_DEBUG, "RTCP-MUX is off");
+ trackPairOut->mRtcpTransport = transport;
+ }
+
+ return NS_OK;
+}
+
+void
+JsepSessionImpl::InitTransport(const SdpMediaSection& msection,
+ JsepTransport* transport)
+{
+ if (mSdpHelper.MsectionIsDisabled(msection)) {
+ transport->Close();
+ return;
+ }
+
+ if (mSdpHelper.HasRtcp(msection.GetProtocol())) {
+ transport->mComponents = 2;
+ } else {
+ transport->mComponents = 1;
+ }
+
+ if (msection.GetAttributeList().HasAttribute(SdpAttribute::kMidAttribute)) {
+ transport->mTransportId = msection.GetAttributeList().GetMid();
+ } else {
+ std::ostringstream os;
+ os << "level_" << msection.GetLevel() << "(no mid)";
+ transport->mTransportId = os.str();
+ }
+}
+
+nsresult
+JsepSessionImpl::FinalizeTransport(const SdpAttributeList& remote,
+ const SdpAttributeList& answer,
+ const RefPtr<JsepTransport>& transport)
+{
+ UniquePtr<JsepIceTransport> ice = MakeUnique<JsepIceTransport>();
+
+ // We do sanity-checking for these in ParseSdp
+ ice->mUfrag = remote.GetIceUfrag();
+ ice->mPwd = remote.GetIcePwd();
+ if (remote.HasAttribute(SdpAttribute::kCandidateAttribute)) {
+ ice->mCandidates = remote.GetCandidate();
+ }
+
+ // RFC 5763 says:
+ //
+ // The endpoint MUST use the setup attribute defined in [RFC4145].
+ // The endpoint that is the offerer MUST use the setup attribute
+ // value of setup:actpass and be prepared to receive a client_hello
+ // before it receives the answer. The answerer MUST use either a
+ // setup attribute value of setup:active or setup:passive. Note that
+ // if the answerer uses setup:passive, then the DTLS handshake will
+ // not begin until the answerer is received, which adds additional
+ // latency. setup:active allows the answer and the DTLS handshake to
+ // occur in parallel. Thus, setup:active is RECOMMENDED. Whichever
+ // party is active MUST initiate a DTLS handshake by sending a
+ // ClientHello over each flow (host/port quartet).
+ UniquePtr<JsepDtlsTransport> dtls = MakeUnique<JsepDtlsTransport>();
+ dtls->mFingerprints = remote.GetFingerprint();
+ if (!answer.HasAttribute(mozilla::SdpAttribute::kSetupAttribute)) {
+ dtls->mRole = mIsOfferer ? JsepDtlsTransport::kJsepDtlsServer
+ : JsepDtlsTransport::kJsepDtlsClient;
+ } else {
+ if (mIsOfferer) {
+ dtls->mRole = (answer.GetSetup().mRole == SdpSetupAttribute::kActive)
+ ? JsepDtlsTransport::kJsepDtlsServer
+ : JsepDtlsTransport::kJsepDtlsClient;
+ } else {
+ dtls->mRole = (answer.GetSetup().mRole == SdpSetupAttribute::kActive)
+ ? JsepDtlsTransport::kJsepDtlsClient
+ : JsepDtlsTransport::kJsepDtlsServer;
+ }
+ }
+
+ transport->mIce = Move(ice);
+ transport->mDtls = Move(dtls);
+
+ if (answer.HasAttribute(SdpAttribute::kRtcpMuxAttribute)) {
+ transport->mComponents = 1;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::AddTransportAttributes(SdpMediaSection* msection,
+ SdpSetupAttribute::Role dtlsRole)
+{
+ if (mIceUfrag.empty() || mIcePwd.empty()) {
+ JSEP_SET_ERROR("Missing ICE ufrag or password");
+ return NS_ERROR_FAILURE;
+ }
+
+ SdpAttributeList& attrList = msection->GetAttributeList();
+ attrList.SetAttribute(
+ new SdpStringAttribute(SdpAttribute::kIceUfragAttribute, mIceUfrag));
+ attrList.SetAttribute(
+ new SdpStringAttribute(SdpAttribute::kIcePwdAttribute, mIcePwd));
+
+ msection->GetAttributeList().SetAttribute(new SdpSetupAttribute(dtlsRole));
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::CopyPreviousTransportParams(const Sdp& oldAnswer,
+ const Sdp& offerersPreviousSdp,
+ const Sdp& newOffer,
+ Sdp* newLocal)
+{
+ for (size_t i = 0; i < oldAnswer.GetMediaSectionCount(); ++i) {
+ if (!mSdpHelper.MsectionIsDisabled(newLocal->GetMediaSection(i)) &&
+ mSdpHelper.AreOldTransportParamsValid(oldAnswer,
+ offerersPreviousSdp,
+ newOffer,
+ i) &&
+ !mRemoteIceIsRestarting
+ ) {
+ // If newLocal is an offer, this will be the number of components we used
+ // last time, and if it is an answer, this will be the number of
+ // components we've decided we're using now.
+ size_t numComponents = mTransports[i]->mComponents;
+ nsresult rv = mSdpHelper.CopyTransportParams(
+ numComponents,
+ mCurrentLocalDescription->GetMediaSection(i),
+ &newLocal->GetMediaSection(i));
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::ParseSdp(const std::string& sdp, UniquePtr<Sdp>* parsedp)
+{
+ UniquePtr<Sdp> parsed = mParser.Parse(sdp);
+ if (!parsed) {
+ std::string error = "Failed to parse SDP: ";
+ mSdpHelper.appendSdpParseErrors(mParser.GetParseErrors(), &error);
+ JSEP_SET_ERROR(error);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // Verify that the JSEP rules for all SDP are followed
+ if (!parsed->GetMediaSectionCount()) {
+ JSEP_SET_ERROR("Description has no media sections");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ std::set<std::string> trackIds;
+
+ for (size_t i = 0; i < parsed->GetMediaSectionCount(); ++i) {
+ if (mSdpHelper.MsectionIsDisabled(parsed->GetMediaSection(i))) {
+ // Disabled, let this stuff slide.
+ continue;
+ }
+
+ const SdpMediaSection& msection(parsed->GetMediaSection(i));
+ auto& mediaAttrs = msection.GetAttributeList();
+
+ if (mediaAttrs.GetIceUfrag().empty()) {
+ JSEP_SET_ERROR("Invalid description, no ice-ufrag attribute");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (mediaAttrs.GetIcePwd().empty()) {
+ JSEP_SET_ERROR("Invalid description, no ice-pwd attribute");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (!mediaAttrs.HasAttribute(SdpAttribute::kFingerprintAttribute)) {
+ JSEP_SET_ERROR("Invalid description, no fingerprint attribute");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const SdpFingerprintAttributeList& fingerprints(
+ mediaAttrs.GetFingerprint());
+ if (fingerprints.mFingerprints.empty()) {
+ JSEP_SET_ERROR("Invalid description, no supported fingerprint algorithms "
+ "present");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (mediaAttrs.HasAttribute(SdpAttribute::kSetupAttribute) &&
+ mediaAttrs.GetSetup().mRole == SdpSetupAttribute::kHoldconn) {
+ JSEP_SET_ERROR("Description has illegal setup attribute "
+ "\"holdconn\" at level "
+ << i);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ auto& formats = parsed->GetMediaSection(i).GetFormats();
+ for (auto f = formats.begin(); f != formats.end(); ++f) {
+ uint16_t pt;
+ if (!SdpHelper::GetPtAsInt(*f, &pt)) {
+ JSEP_SET_ERROR("Payload type \""
+ << *f << "\" is not a 16-bit unsigned int at level "
+ << i);
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+
+ std::string streamId;
+ std::string trackId;
+ nsresult rv = mSdpHelper.GetIdsFromMsid(*parsed,
+ parsed->GetMediaSection(i),
+ &streamId,
+ &trackId);
+
+ if (NS_SUCCEEDED(rv)) {
+ if (trackIds.count(trackId)) {
+ JSEP_SET_ERROR("track id:" << trackId
+ << " appears in more than one m-section at level " << i);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ trackIds.insert(trackId);
+ } else if (rv != NS_ERROR_NOT_AVAILABLE) {
+ // Error has already been set
+ return rv;
+ }
+
+ static const std::bitset<128> forbidden = GetForbiddenSdpPayloadTypes();
+ if (msection.GetMediaType() == SdpMediaSection::kAudio ||
+ msection.GetMediaType() == SdpMediaSection::kVideo) {
+ // Sanity-check that payload type can work with RTP
+ for (const std::string& fmt : msection.GetFormats()) {
+ uint16_t payloadType;
+ // TODO (bug 1204099): Make this check for reserved ranges.
+ if (!SdpHelper::GetPtAsInt(fmt, &payloadType) || payloadType > 127) {
+ JSEP_SET_ERROR("audio/video payload type is too large: " << fmt);
+ return NS_ERROR_INVALID_ARG;
+ }
+ if (forbidden.test(payloadType)) {
+ JSEP_SET_ERROR("Illegal audio/video payload type: " << fmt);
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+ }
+ }
+
+ *parsedp = Move(parsed);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetRemoteDescriptionOffer(UniquePtr<Sdp> offer)
+{
+ MOZ_ASSERT(mState == kJsepStateStable);
+
+ // TODO(bug 1095780): Note that we create remote tracks even when
+ // They contain only codecs we can't negotiate or other craziness.
+ nsresult rv = SetRemoteTracksFromDescription(offer.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mPendingRemoteDescription = Move(offer);
+
+ SetState(kJsepStateHaveRemoteOffer);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetRemoteDescriptionAnswer(JsepSdpType type,
+ UniquePtr<Sdp> answer)
+{
+ MOZ_ASSERT(mState == kJsepStateHaveLocalOffer ||
+ mState == kJsepStateHaveRemotePranswer);
+
+ mPendingRemoteDescription = Move(answer);
+
+ nsresult rv = ValidateAnswer(*mPendingLocalDescription,
+ *mPendingRemoteDescription);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // TODO(bug 1095780): Note that this creates remote tracks even if
+ // we offered sendonly and other side offered sendrecv or recvonly.
+ rv = SetRemoteTracksFromDescription(mPendingRemoteDescription.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = HandleNegotiatedSession(mPendingLocalDescription,
+ mPendingRemoteDescription);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mCurrentRemoteDescription = Move(mPendingRemoteDescription);
+ mCurrentLocalDescription = Move(mPendingLocalDescription);
+ mWasOffererLastTime = mIsOfferer;
+
+ SetState(kJsepStateStable);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetRemoteTracksFromDescription(const Sdp* remoteDescription)
+{
+ // Unassign all remote tracks
+ for (auto i = mRemoteTracks.begin(); i != mRemoteTracks.end(); ++i) {
+ i->mAssignedMLine.reset();
+ }
+
+ // This will not exist if we're rolling back the first remote description
+ if (remoteDescription) {
+ size_t numMlines = remoteDescription->GetMediaSectionCount();
+ nsresult rv;
+
+ // Iterate over the sdp, re-assigning or creating remote tracks as we go
+ for (size_t i = 0; i < numMlines; ++i) {
+ const SdpMediaSection& msection = remoteDescription->GetMediaSection(i);
+
+ if (mSdpHelper.MsectionIsDisabled(msection) || !msection.IsSending()) {
+ continue;
+ }
+
+ std::vector<JsepReceivingTrack>::iterator track;
+
+ if (msection.GetMediaType() == SdpMediaSection::kApplication) {
+ // Datachannel doesn't have msid, just search by type
+ track = FindUnassignedTrackByType(mRemoteTracks,
+ msection.GetMediaType());
+ } else {
+ std::string streamId;
+ std::string trackId;
+ rv = GetRemoteIds(*remoteDescription, msection, &streamId, &trackId);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ track = FindTrackByIds(mRemoteTracks, streamId, trackId);
+ }
+
+ if (track == mRemoteTracks.end()) {
+ RefPtr<JsepTrack> track;
+ rv = CreateReceivingTrack(i, *remoteDescription, msection, &track);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ JsepReceivingTrack rtrack;
+ rtrack.mTrack = track;
+ rtrack.mAssignedMLine = Some(i);
+ mRemoteTracks.push_back(rtrack);
+ mRemoteTracksAdded.push_back(rtrack);
+ } else {
+ track->mAssignedMLine = Some(i);
+ }
+ }
+ }
+
+ // Remove any unassigned remote track ids
+ for (size_t i = 0; i < mRemoteTracks.size();) {
+ if (!mRemoteTracks[i].mAssignedMLine.isSome()) {
+ mRemoteTracksRemoved.push_back(mRemoteTracks[i]);
+ mRemoteTracks.erase(mRemoteTracks.begin() + i);
+ } else {
+ ++i;
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::ValidateLocalDescription(const Sdp& description)
+{
+ // TODO(bug 1095226): Better checking.
+ if (!mGeneratedLocalDescription) {
+ JSEP_SET_ERROR("Calling SetLocal without first calling CreateOffer/Answer"
+ " is not supported.");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (description.GetMediaSectionCount() !=
+ mGeneratedLocalDescription->GetMediaSectionCount()) {
+ JSEP_SET_ERROR("Changing the number of m-sections is not allowed");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ for (size_t i = 0; i < description.GetMediaSectionCount(); ++i) {
+ auto& origMsection = mGeneratedLocalDescription->GetMediaSection(i);
+ auto& finalMsection = description.GetMediaSection(i);
+ if (origMsection.GetMediaType() != finalMsection.GetMediaType()) {
+ JSEP_SET_ERROR("Changing the media-type of m-sections is not allowed");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // These will be present in reoffer
+ if (!mCurrentLocalDescription) {
+ if (finalMsection.GetAttributeList().HasAttribute(
+ SdpAttribute::kCandidateAttribute)) {
+ JSEP_SET_ERROR("Adding your own candidate attributes is not supported");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (finalMsection.GetAttributeList().HasAttribute(
+ SdpAttribute::kEndOfCandidatesAttribute)) {
+ JSEP_SET_ERROR("Why are you trying to set a=end-of-candidates?");
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+
+ // TODO(bug 1095218): Check msid
+ // TODO(bug 1095226): Check ice-ufrag and ice-pwd
+ // TODO(bug 1095226): Check fingerprints
+ // TODO(bug 1095226): Check payload types (at least ensure that payload
+ // types we don't actually support weren't added)
+ // TODO(bug 1095226): Check ice-options?
+ }
+
+ if (description.GetAttributeList().HasAttribute(
+ SdpAttribute::kIceLiteAttribute)) {
+ JSEP_SET_ERROR("Running ICE in lite mode is unsupported");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::ValidateRemoteDescription(const Sdp& description)
+{
+ if (!mCurrentRemoteDescription || !mCurrentLocalDescription) {
+ // Not renegotiation; checks for whether a remote answer are consistent
+ // with our offer are handled in ValidateAnswer()
+ return NS_OK;
+ }
+
+ if (mCurrentRemoteDescription->GetMediaSectionCount() >
+ description.GetMediaSectionCount()) {
+ JSEP_SET_ERROR("New remote description has fewer m-sections than the "
+ "previous remote description.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // These are solely to check that bundle is valid
+ SdpHelper::BundledMids bundledMids;
+ nsresult rv = GetNegotiatedBundledMids(&bundledMids);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ SdpHelper::BundledMids newBundledMids;
+ rv = mSdpHelper.GetBundledMids(description, &newBundledMids);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // check for partial ice restart, which is not supported
+ Maybe<bool> iceCredsDiffer;
+ for (size_t i = 0;
+ i < mCurrentRemoteDescription->GetMediaSectionCount();
+ ++i) {
+
+ const SdpMediaSection& newMsection = description.GetMediaSection(i);
+ const SdpMediaSection& oldMsection =
+ mCurrentRemoteDescription->GetMediaSection(i);
+
+ if (mSdpHelper.MsectionIsDisabled(newMsection) ||
+ mSdpHelper.MsectionIsDisabled(oldMsection)) {
+ continue;
+ }
+
+ if (oldMsection.GetMediaType() != newMsection.GetMediaType()) {
+ JSEP_SET_ERROR("Remote description changes the media type of m-line "
+ << i);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ bool differ = mSdpHelper.IceCredentialsDiffer(newMsection, oldMsection);
+ // Detect whether all the creds are the same or all are different
+ if (!iceCredsDiffer.isSome()) {
+ // for the first msection capture whether creds are different or same
+ iceCredsDiffer = mozilla::Some(differ);
+ } else if (iceCredsDiffer.isSome() && *iceCredsDiffer != differ) {
+ // subsequent msections must match the first sections
+ JSEP_SET_ERROR("Partial ICE restart is unsupported at this time "
+ "(new remote description changes either the ice-ufrag "
+ "or ice-pwd on fewer than all msections)");
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::ValidateAnswer(const Sdp& offer, const Sdp& answer)
+{
+ if (offer.GetMediaSectionCount() != answer.GetMediaSectionCount()) {
+ JSEP_SET_ERROR("Offer and answer have different number of m-lines "
+ << "(" << offer.GetMediaSectionCount() << " vs "
+ << answer.GetMediaSectionCount() << ")");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ for (size_t i = 0; i < offer.GetMediaSectionCount(); ++i) {
+ const SdpMediaSection& offerMsection = offer.GetMediaSection(i);
+ const SdpMediaSection& answerMsection = answer.GetMediaSection(i);
+
+ if (offerMsection.GetMediaType() != answerMsection.GetMediaType()) {
+ JSEP_SET_ERROR(
+ "Answer and offer have different media types at m-line " << i);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (!offerMsection.IsSending() && answerMsection.IsReceiving()) {
+ JSEP_SET_ERROR("Answer tried to set recv when offer did not set send");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (!offerMsection.IsReceiving() && answerMsection.IsSending()) {
+ JSEP_SET_ERROR("Answer tried to set send when offer did not set recv");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const SdpAttributeList& answerAttrs(answerMsection.GetAttributeList());
+ const SdpAttributeList& offerAttrs(offerMsection.GetAttributeList());
+ if (answerAttrs.HasAttribute(SdpAttribute::kMidAttribute) &&
+ offerAttrs.HasAttribute(SdpAttribute::kMidAttribute) &&
+ offerAttrs.GetMid() != answerAttrs.GetMid()) {
+ JSEP_SET_ERROR("Answer changes mid for level, was \'"
+ << offerMsection.GetAttributeList().GetMid()
+ << "\', now \'"
+ << answerMsection.GetAttributeList().GetMid() << "\'");
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::CreateReceivingTrack(size_t mline,
+ const Sdp& sdp,
+ const SdpMediaSection& msection,
+ RefPtr<JsepTrack>* track)
+{
+ std::string streamId;
+ std::string trackId;
+
+ nsresult rv = GetRemoteIds(sdp, msection, &streamId, &trackId);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ *track = new JsepTrack(msection.GetMediaType(),
+ streamId,
+ trackId,
+ sdp::kRecv);
+
+ (*track)->SetCNAME(mSdpHelper.GetCNAME(msection));
+ (*track)->PopulateCodecs(mSupportedCodecs.values);
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::CreateGenericSDP(UniquePtr<Sdp>* sdpp)
+{
+ // draft-ietf-rtcweb-jsep-08 Section 5.2.1:
+ // o The second SDP line MUST be an "o=" line, as specified in
+ // [RFC4566], Section 5.2. The value of the <username> field SHOULD
+ // be "-". The value of the <sess-id> field SHOULD be a
+ // cryptographically random number. To ensure uniqueness, this
+ // number SHOULD be at least 64 bits long. The value of the <sess-
+ // version> field SHOULD be zero. The value of the <nettype>
+ // <addrtype> <unicast-address> tuple SHOULD be set to a non-
+ // meaningful address, such as IN IP4 0.0.0.0, to prevent leaking the
+ // local address in this field. As mentioned in [RFC4566], the
+ // entire o= line needs to be unique, but selecting a random number
+ // for <sess-id> is sufficient to accomplish this.
+
+ auto origin =
+ SdpOrigin("mozilla...THIS_IS_SDPARTA-" MOZ_APP_UA_VERSION,
+ mSessionId,
+ mSessionVersion,
+ sdp::kIPv4,
+ "0.0.0.0");
+
+ UniquePtr<Sdp> sdp = MakeUnique<SipccSdp>(origin);
+
+ if (mDtlsFingerprints.empty()) {
+ JSEP_SET_ERROR("Missing DTLS fingerprint");
+ return NS_ERROR_FAILURE;
+ }
+
+ UniquePtr<SdpFingerprintAttributeList> fpl =
+ MakeUnique<SdpFingerprintAttributeList>();
+ for (auto fp = mDtlsFingerprints.begin(); fp != mDtlsFingerprints.end();
+ ++fp) {
+ fpl->PushEntry(fp->mAlgorithm, fp->mValue);
+ }
+ sdp->GetAttributeList().SetAttribute(fpl.release());
+
+ auto* iceOpts = new SdpOptionsAttribute(SdpAttribute::kIceOptionsAttribute);
+ iceOpts->PushEntry("trickle");
+ sdp->GetAttributeList().SetAttribute(iceOpts);
+
+ // This assumes content doesn't add a bunch of msid attributes with a
+ // different semantic in mind.
+ std::vector<std::string> msids;
+ msids.push_back("*");
+ mSdpHelper.SetupMsidSemantic(msids, sdp.get());
+
+ *sdpp = Move(sdp);
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::SetupIds()
+{
+ SECStatus rv = PK11_GenerateRandom(
+ reinterpret_cast<unsigned char*>(&mSessionId), sizeof(mSessionId));
+ // RFC 3264 says that session-ids MUST be representable as a _signed_
+ // 64 bit number, meaning the MSB cannot be set.
+ mSessionId = mSessionId >> 1;
+ if (rv != SECSuccess) {
+ JSEP_SET_ERROR("Failed to generate session id: " << rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!mUuidGen->Generate(&mDefaultRemoteStreamId)) {
+ JSEP_SET_ERROR("Failed to generate default uuid for streams");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!mUuidGen->Generate(&mCNAME)) {
+ JSEP_SET_ERROR("Failed to generate CNAME");
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::CreateSsrc(uint32_t* ssrc)
+{
+ do {
+ SECStatus rv = PK11_GenerateRandom(
+ reinterpret_cast<unsigned char*>(ssrc), sizeof(uint32_t));
+ if (rv != SECSuccess) {
+ JSEP_SET_ERROR("Failed to generate SSRC, error=" << rv);
+ return NS_ERROR_FAILURE;
+ }
+ } while (mSsrcs.count(*ssrc));
+ mSsrcs.insert(*ssrc);
+
+ return NS_OK;
+}
+
+void
+JsepSessionImpl::SetupDefaultCodecs()
+{
+ // Supported audio codecs.
+ // Per jmspeex on IRC:
+ // For 32KHz sampling, 28 is ok, 32 is good, 40 should be really good
+ // quality. Note that 1-2Kbps will be wasted on a stereo Opus channel
+ // with mono input compared to configuring it for mono.
+ // If we reduce bitrate enough Opus will low-pass us; 16000 will kill a
+ // 9KHz tone. This should be adaptive when we're at the low-end of video
+ // bandwidth (say <100Kbps), and if we're audio-only, down to 8 or
+ // 12Kbps.
+ mSupportedCodecs.values.push_back(new JsepAudioCodecDescription(
+ "109",
+ "opus",
+ 48000,
+ 2,
+ 960,
+#ifdef WEBRTC_GONK
+ // TODO Move this elsewhere to be adaptive to rate - Bug 1207925
+ 16000 // B2G uses lower capture sampling rate
+#else
+ 40000
+#endif
+ ));
+
+ mSupportedCodecs.values.push_back(new JsepAudioCodecDescription(
+ "9",
+ "G722",
+ 8000,
+ 1,
+ 320,
+ 64000));
+
+ // packet size and bitrate values below copied from sipcc.
+ // May need reevaluation from a media expert.
+ mSupportedCodecs.values.push_back(
+ new JsepAudioCodecDescription("0",
+ "PCMU",
+ 8000,
+ 1,
+ 8000 / 50, // frequency / 50
+ 8 * 8000 * 1 // 8 * frequency * channels
+ ));
+
+ mSupportedCodecs.values.push_back(
+ new JsepAudioCodecDescription("8",
+ "PCMA",
+ 8000,
+ 1,
+ 8000 / 50, // frequency / 50
+ 8 * 8000 * 1 // 8 * frequency * channels
+ ));
+
+ // note: because telephone-event is effectively a marker codec that indicates
+ // that dtmf rtp packets may be passed, the packetSize and bitRate fields
+ // don't make sense here. For now, use zero. (mjf)
+ mSupportedCodecs.values.push_back(
+ new JsepAudioCodecDescription("101",
+ "telephone-event",
+ 8000,
+ 1,
+ 0, // packetSize doesn't make sense here
+ 0 // bitRate doesn't make sense here
+ ));
+
+ // Supported video codecs.
+ // Note: order here implies priority for building offers!
+ JsepVideoCodecDescription* vp8 = new JsepVideoCodecDescription(
+ "120",
+ "VP8",
+ 90000
+ );
+ // Defaults for mandatory params
+ vp8->mConstraints.maxFs = 12288; // Enough for 2048x1536
+ vp8->mConstraints.maxFps = 60;
+ mSupportedCodecs.values.push_back(vp8);
+
+ JsepVideoCodecDescription* vp9 = new JsepVideoCodecDescription(
+ "121",
+ "VP9",
+ 90000
+ );
+ // Defaults for mandatory params
+ vp9->mConstraints.maxFs = 12288; // Enough for 2048x1536
+ vp9->mConstraints.maxFps = 60;
+ mSupportedCodecs.values.push_back(vp9);
+
+ JsepVideoCodecDescription* h264_1 = new JsepVideoCodecDescription(
+ "126",
+ "H264",
+ 90000
+ );
+ h264_1->mPacketizationMode = 1;
+ // Defaults for mandatory params
+ h264_1->mProfileLevelId = 0x42E00D;
+ mSupportedCodecs.values.push_back(h264_1);
+
+ JsepVideoCodecDescription* h264_0 = new JsepVideoCodecDescription(
+ "97",
+ "H264",
+ 90000
+ );
+ h264_0->mPacketizationMode = 0;
+ // Defaults for mandatory params
+ h264_0->mProfileLevelId = 0x42E00D;
+ mSupportedCodecs.values.push_back(h264_0);
+
+ JsepVideoCodecDescription* red = new JsepVideoCodecDescription(
+ "122", // payload type
+ "red", // codec name
+ 90000 // clock rate (match other video codecs)
+ );
+ mSupportedCodecs.values.push_back(red);
+
+ JsepVideoCodecDescription* ulpfec = new JsepVideoCodecDescription(
+ "123", // payload type
+ "ulpfec", // codec name
+ 90000 // clock rate (match other video codecs)
+ );
+ mSupportedCodecs.values.push_back(ulpfec);
+
+ mSupportedCodecs.values.push_back(new JsepApplicationCodecDescription(
+ "5000",
+ "webrtc-datachannel",
+ WEBRTC_DATACHANNEL_STREAMS_DEFAULT
+ ));
+
+ // Update the redundant encodings for the RED codec with the supported
+ // codecs. Note: only uses the video codecs.
+ red->UpdateRedundantEncodings(mSupportedCodecs.values);
+}
+
+void
+JsepSessionImpl::SetupDefaultRtpExtensions()
+{
+ AddAudioRtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level",
+ SdpDirectionAttribute::Direction::kSendonly);
+}
+
+void
+JsepSessionImpl::SetState(JsepSignalingState state)
+{
+ if (state == mState)
+ return;
+
+ MOZ_MTLOG(ML_NOTICE, "[" << mName << "]: " <<
+ GetStateStr(mState) << " -> " << GetStateStr(state));
+ mState = state;
+}
+
+nsresult
+JsepSessionImpl::AddRemoteIceCandidate(const std::string& candidate,
+ const std::string& mid,
+ uint16_t level)
+{
+ mLastError.clear();
+
+ mozilla::Sdp* sdp = GetParsedRemoteDescription();
+
+ if (!sdp) {
+ JSEP_SET_ERROR("Cannot add ICE candidate in state " << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ return mSdpHelper.AddCandidateToSdp(sdp, candidate, mid, level);
+}
+
+nsresult
+JsepSessionImpl::AddLocalIceCandidate(const std::string& candidate,
+ uint16_t level,
+ std::string* mid,
+ bool* skipped)
+{
+ mLastError.clear();
+
+ mozilla::Sdp* sdp = GetParsedLocalDescription();
+
+ if (!sdp) {
+ JSEP_SET_ERROR("Cannot add ICE candidate in state " << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (sdp->GetMediaSectionCount() <= level) {
+ // mainly here to make some testing less complicated, but also just in case
+ *skipped = true;
+ return NS_OK;
+ }
+
+ if (mState == kJsepStateStable) {
+ const Sdp* answer(GetAnswer());
+ if (mSdpHelper.IsBundleSlave(*answer, level)) {
+ // We do not add candidate attributes to bundled m-sections unless they
+ // are the "master" bundle m-section.
+ *skipped = true;
+ return NS_OK;
+ }
+ }
+
+ nsresult rv = mSdpHelper.GetMidFromLevel(*sdp, level, mid);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ *skipped = false;
+
+ return mSdpHelper.AddCandidateToSdp(sdp, candidate, *mid, level);
+}
+
+nsresult
+JsepSessionImpl::UpdateDefaultCandidate(
+ const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ uint16_t level)
+{
+ mLastError.clear();
+
+ mozilla::Sdp* sdp = GetParsedLocalDescription();
+
+ if (!sdp) {
+ JSEP_SET_ERROR("Cannot add ICE candidate in state " << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (level >= sdp->GetMediaSectionCount()) {
+ return NS_OK;
+ }
+
+ std::string defaultRtcpCandidateAddrCopy(defaultRtcpCandidateAddr);
+ if (mState == kJsepStateStable && mTransports[level]->mComponents == 1) {
+ // We know we're doing rtcp-mux by now. Don't create an rtcp attr.
+ defaultRtcpCandidateAddrCopy = "";
+ defaultRtcpCandidatePort = 0;
+ }
+
+ // If offer/answer isn't done, it is too early to tell whether these defaults
+ // need to be applied to other m-sections.
+ SdpHelper::BundledMids bundledMids;
+ if (mState == kJsepStateStable) {
+ nsresult rv = GetNegotiatedBundledMids(&bundledMids);
+ if (NS_FAILED(rv)) {
+ MOZ_ASSERT(false);
+ mLastError += " (This should have been caught sooner!)";
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ mSdpHelper.SetDefaultAddresses(
+ defaultCandidateAddr,
+ defaultCandidatePort,
+ defaultRtcpCandidateAddrCopy,
+ defaultRtcpCandidatePort,
+ sdp,
+ level,
+ bundledMids);
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::EndOfLocalCandidates(uint16_t level)
+{
+ mLastError.clear();
+
+ mozilla::Sdp* sdp = GetParsedLocalDescription();
+
+ if (!sdp) {
+ JSEP_SET_ERROR("Cannot mark end of local ICE candidates in state "
+ << GetStateStr(mState));
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (level >= sdp->GetMediaSectionCount()) {
+ return NS_OK;
+ }
+
+ // If offer/answer isn't done, it is too early to tell whether this update
+ // needs to be applied to other m-sections.
+ SdpHelper::BundledMids bundledMids;
+ if (mState == kJsepStateStable) {
+ nsresult rv = GetNegotiatedBundledMids(&bundledMids);
+ if (NS_FAILED(rv)) {
+ MOZ_ASSERT(false);
+ mLastError += " (This should have been caught sooner!)";
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ mSdpHelper.SetIceGatheringComplete(sdp,
+ level,
+ bundledMids);
+
+ return NS_OK;
+}
+
+nsresult
+JsepSessionImpl::GetNegotiatedBundledMids(SdpHelper::BundledMids* bundledMids)
+{
+ const Sdp* answerSdp = GetAnswer();
+
+ if (!answerSdp) {
+ return NS_OK;
+ }
+
+ return mSdpHelper.GetBundledMids(*answerSdp, bundledMids);
+}
+
+nsresult
+JsepSessionImpl::EnableOfferMsection(SdpMediaSection* msection)
+{
+ // We assert here because adding rtcp-mux to a non-disabled m-section that
+ // did not already have rtcp-mux can cause problems.
+ MOZ_ASSERT(mSdpHelper.MsectionIsDisabled(*msection));
+
+ msection->SetPort(9);
+
+ // We don't do this in AddTransportAttributes because that is also used for
+ // making answers, and we don't want to unconditionally set rtcp-mux there.
+ if (mSdpHelper.HasRtcp(msection->GetProtocol())) {
+ // Set RTCP-MUX.
+ msection->GetAttributeList().SetAttribute(
+ new SdpFlagAttribute(SdpAttribute::kRtcpMuxAttribute));
+ }
+
+ nsresult rv = AddTransportAttributes(msection, SdpSetupAttribute::kActpass);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ rv = SetRecvonlySsrc(msection);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ AddExtmap(msection);
+
+ std::ostringstream osMid;
+ osMid << "sdparta_" << msection->GetLevel();
+ AddMid(osMid.str(), msection);
+
+ return NS_OK;
+}
+
+mozilla::Sdp*
+JsepSessionImpl::GetParsedLocalDescription() const
+{
+ if (mPendingLocalDescription) {
+ return mPendingLocalDescription.get();
+ } else if (mCurrentLocalDescription) {
+ return mCurrentLocalDescription.get();
+ }
+
+ return nullptr;
+}
+
+mozilla::Sdp*
+JsepSessionImpl::GetParsedRemoteDescription() const
+{
+ if (mPendingRemoteDescription) {
+ return mPendingRemoteDescription.get();
+ } else if (mCurrentRemoteDescription) {
+ return mCurrentRemoteDescription.get();
+ }
+
+ return nullptr;
+}
+
+const Sdp*
+JsepSessionImpl::GetAnswer() const
+{
+ return mWasOffererLastTime ? mCurrentRemoteDescription.get()
+ : mCurrentLocalDescription.get();
+}
+
+nsresult
+JsepSessionImpl::Close()
+{
+ mLastError.clear();
+ SetState(kJsepStateClosed);
+ return NS_OK;
+}
+
+const std::string
+JsepSessionImpl::GetLastError() const
+{
+ return mLastError;
+}
+
+bool
+JsepSessionImpl::AllLocalTracksAreAssigned() const
+{
+ for (auto i = mLocalTracks.begin(); i != mLocalTracks.end(); ++i) {
+ if (!i->mAssignedMLine.isSome()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/jsep/JsepSessionImpl.h b/media/webrtc/signaling/src/jsep/JsepSessionImpl.h
new file mode 100644
index 000000000..00c07d25e
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepSessionImpl.h
@@ -0,0 +1,352 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _JSEPSESSIONIMPL_H_
+#define _JSEPSESSIONIMPL_H_
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "signaling/src/jsep/JsepCodecDescription.h"
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepSession.h"
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/sdp/SipccSdpParser.h"
+#include "signaling/src/sdp/SdpHelper.h"
+#include "signaling/src/common/PtrVector.h"
+
+namespace mozilla {
+
+class JsepUuidGenerator
+{
+public:
+ virtual ~JsepUuidGenerator() {}
+ virtual bool Generate(std::string* id) = 0;
+};
+
+class JsepSessionImpl : public JsepSession
+{
+public:
+ JsepSessionImpl(const std::string& name, UniquePtr<JsepUuidGenerator> uuidgen)
+ : JsepSession(name),
+ mIsOfferer(false),
+ mWasOffererLastTime(false),
+ mIceControlling(false),
+ mRemoteIsIceLite(false),
+ mRemoteIceIsRestarting(false),
+ mBundlePolicy(kBundleBalanced),
+ mSessionId(0),
+ mSessionVersion(0),
+ mUuidGen(Move(uuidgen)),
+ mSdpHelper(&mLastError)
+ {
+ }
+
+ // Implement JsepSession methods.
+ virtual nsresult Init() override;
+
+ virtual nsresult AddTrack(const RefPtr<JsepTrack>& track) override;
+
+ virtual nsresult RemoveTrack(const std::string& streamId,
+ const std::string& trackId) override;
+
+ virtual nsresult SetIceCredentials(const std::string& ufrag,
+ const std::string& pwd) override;
+ virtual const std::string& GetUfrag() const override { return mIceUfrag; }
+ virtual const std::string& GetPwd() const override { return mIcePwd; }
+ nsresult SetBundlePolicy(JsepBundlePolicy policy) override;
+
+ virtual bool
+ RemoteIsIceLite() const override
+ {
+ return mRemoteIsIceLite;
+ }
+
+ virtual bool
+ RemoteIceIsRestarting() const override
+ {
+ return mRemoteIceIsRestarting;
+ }
+
+ virtual std::vector<std::string>
+ GetIceOptions() const override
+ {
+ return mIceOptions;
+ }
+
+ virtual nsresult AddDtlsFingerprint(const std::string& algorithm,
+ const std::vector<uint8_t>& value) override;
+
+ nsresult AddRtpExtension(std::vector<SdpExtmapAttributeList::Extmap>& extensions,
+ const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction);
+ virtual nsresult AddAudioRtpExtension(
+ const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction =
+ SdpDirectionAttribute::Direction::kSendrecv) override;
+
+ virtual nsresult AddVideoRtpExtension(
+ const std::string& extensionName,
+ SdpDirectionAttribute::Direction direction =
+ SdpDirectionAttribute::Direction::kSendrecv) override;
+
+ virtual std::vector<JsepCodecDescription*>&
+ Codecs() override
+ {
+ return mSupportedCodecs.values;
+ }
+
+ virtual nsresult ReplaceTrack(const std::string& oldStreamId,
+ const std::string& oldTrackId,
+ const std::string& newStreamId,
+ const std::string& newTrackId) override;
+
+ virtual nsresult SetParameters(
+ const std::string& streamId,
+ const std::string& trackId,
+ const std::vector<JsepTrack::JsConstraints>& constraints) override;
+
+ virtual nsresult GetParameters(
+ const std::string& streamId,
+ const std::string& trackId,
+ std::vector<JsepTrack::JsConstraints>* outConstraints) override;
+
+ virtual std::vector<RefPtr<JsepTrack>> GetLocalTracks() const override;
+
+ virtual std::vector<RefPtr<JsepTrack>> GetRemoteTracks() const override;
+
+ virtual std::vector<RefPtr<JsepTrack>>
+ GetRemoteTracksAdded() const override;
+
+ virtual std::vector<RefPtr<JsepTrack>>
+ GetRemoteTracksRemoved() const override;
+
+ virtual nsresult CreateOffer(const JsepOfferOptions& options,
+ std::string* offer) override;
+
+ virtual nsresult CreateAnswer(const JsepAnswerOptions& options,
+ std::string* answer) override;
+
+ virtual std::string GetLocalDescription() const override;
+
+ virtual std::string GetRemoteDescription() const override;
+
+ virtual nsresult SetLocalDescription(JsepSdpType type,
+ const std::string& sdp) override;
+
+ virtual nsresult SetRemoteDescription(JsepSdpType type,
+ const std::string& sdp) override;
+
+ virtual nsresult AddRemoteIceCandidate(const std::string& candidate,
+ const std::string& mid,
+ uint16_t level) override;
+
+ virtual nsresult AddLocalIceCandidate(const std::string& candidate,
+ uint16_t level,
+ std::string* mid,
+ bool* skipped) override;
+
+ virtual nsresult UpdateDefaultCandidate(
+ const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ uint16_t level) override;
+
+ virtual nsresult EndOfLocalCandidates(uint16_t level) override;
+
+ virtual nsresult Close() override;
+
+ virtual const std::string GetLastError() const override;
+
+ virtual bool
+ IsIceControlling() const override
+ {
+ return mIceControlling;
+ }
+
+ virtual bool
+ IsOfferer() const
+ {
+ return mIsOfferer;
+ }
+
+ // Access transports.
+ virtual std::vector<RefPtr<JsepTransport>>
+ GetTransports() const override
+ {
+ return mTransports;
+ }
+
+ virtual std::vector<JsepTrackPair>
+ GetNegotiatedTrackPairs() const override
+ {
+ return mNegotiatedTrackPairs;
+ }
+
+ virtual bool AllLocalTracksAreAssigned() const override;
+
+private:
+ struct JsepDtlsFingerprint {
+ std::string mAlgorithm;
+ std::vector<uint8_t> mValue;
+ };
+
+ struct JsepSendingTrack {
+ RefPtr<JsepTrack> mTrack;
+ Maybe<size_t> mAssignedMLine;
+ };
+
+ struct JsepReceivingTrack {
+ RefPtr<JsepTrack> mTrack;
+ Maybe<size_t> mAssignedMLine;
+ };
+
+ // Non-const so it can set mLastError
+ nsresult CreateGenericSDP(UniquePtr<Sdp>* sdp);
+ void AddExtmap(SdpMediaSection* msection) const;
+ void AddMid(const std::string& mid, SdpMediaSection* msection) const;
+ const std::vector<SdpExtmapAttributeList::Extmap>* GetRtpExtensions(
+ SdpMediaSection::MediaType type) const;
+
+ void AddCommonExtmaps(const SdpMediaSection& remoteMsection,
+ SdpMediaSection* msection);
+ nsresult SetupIds();
+ nsresult CreateSsrc(uint32_t* ssrc);
+ void SetupDefaultCodecs();
+ void SetupDefaultRtpExtensions();
+ void SetState(JsepSignalingState state);
+ // Non-const so it can set mLastError
+ nsresult ParseSdp(const std::string& sdp, UniquePtr<Sdp>* parsedp);
+ nsresult SetLocalDescriptionOffer(UniquePtr<Sdp> offer);
+ nsresult SetLocalDescriptionAnswer(JsepSdpType type, UniquePtr<Sdp> answer);
+ nsresult SetRemoteDescriptionOffer(UniquePtr<Sdp> offer);
+ nsresult SetRemoteDescriptionAnswer(JsepSdpType type, UniquePtr<Sdp> answer);
+ nsresult ValidateLocalDescription(const Sdp& description);
+ nsresult ValidateRemoteDescription(const Sdp& description);
+ nsresult ValidateAnswer(const Sdp& offer, const Sdp& answer);
+ nsresult SetRemoteTracksFromDescription(const Sdp* remoteDescription);
+ // Non-const because we use our Uuid generator
+ nsresult CreateReceivingTrack(size_t mline,
+ const Sdp& sdp,
+ const SdpMediaSection& msection,
+ RefPtr<JsepTrack>* track);
+ nsresult HandleNegotiatedSession(const UniquePtr<Sdp>& local,
+ const UniquePtr<Sdp>& remote);
+ nsresult AddTransportAttributes(SdpMediaSection* msection,
+ SdpSetupAttribute::Role dtlsRole);
+ nsresult CopyPreviousTransportParams(const Sdp& oldAnswer,
+ const Sdp& offerersPreviousSdp,
+ const Sdp& newOffer,
+ Sdp* newLocal);
+ nsresult SetupOfferMSections(const JsepOfferOptions& options, Sdp* sdp);
+ // Non-const so it can assign m-line index to tracks
+ nsresult SetupOfferMSectionsByType(SdpMediaSection::MediaType type,
+ Maybe<size_t> offerToReceive,
+ Sdp* sdp);
+ nsresult BindLocalTracks(SdpMediaSection::MediaType mediatype,
+ Sdp* sdp);
+ nsresult BindRemoteTracks(SdpMediaSection::MediaType mediatype,
+ Sdp* sdp,
+ size_t* offerToReceive);
+ nsresult SetRecvAsNeededOrDisable(SdpMediaSection::MediaType mediatype,
+ Sdp* sdp,
+ size_t* offerToRecv);
+ void SetupOfferToReceiveMsection(SdpMediaSection* offer);
+ nsresult AddRecvonlyMsections(SdpMediaSection::MediaType mediatype,
+ size_t count,
+ Sdp* sdp);
+ nsresult AddReofferMsections(const Sdp& oldLocalSdp,
+ const Sdp& oldAnswer,
+ Sdp* newSdp);
+ void SetupBundle(Sdp* sdp) const;
+ nsresult GetRemoteIds(const Sdp& sdp,
+ const SdpMediaSection& msection,
+ std::string* streamId,
+ std::string* trackId);
+ nsresult CreateOfferMSection(SdpMediaSection::MediaType type,
+ SdpMediaSection::Protocol proto,
+ SdpDirectionAttribute::Direction direction,
+ Sdp* sdp);
+ nsresult GetFreeMsectionForSend(SdpMediaSection::MediaType type,
+ Sdp* sdp,
+ SdpMediaSection** msection);
+ nsresult CreateAnswerMSection(const JsepAnswerOptions& options,
+ size_t mlineIndex,
+ const SdpMediaSection& remoteMsection,
+ Sdp* sdp);
+ nsresult SetRecvonlySsrc(SdpMediaSection* msection);
+ nsresult BindMatchingLocalTrackToAnswer(SdpMediaSection* msection);
+ nsresult BindMatchingRemoteTrackToAnswer(SdpMediaSection* msection);
+ nsresult DetermineAnswererSetupRole(const SdpMediaSection& remoteMsection,
+ SdpSetupAttribute::Role* rolep);
+ nsresult MakeNegotiatedTrackPair(const SdpMediaSection& remote,
+ const SdpMediaSection& local,
+ const RefPtr<JsepTransport>& transport,
+ bool usingBundle,
+ size_t transportLevel,
+ JsepTrackPair* trackPairOut);
+ void InitTransport(const SdpMediaSection& msection, JsepTransport* transport);
+
+ nsresult FinalizeTransport(const SdpAttributeList& remote,
+ const SdpAttributeList& answer,
+ const RefPtr<JsepTransport>& transport);
+
+ nsresult GetNegotiatedBundledMids(SdpHelper::BundledMids* bundledMids);
+
+ nsresult EnableOfferMsection(SdpMediaSection* msection);
+
+ mozilla::Sdp* GetParsedLocalDescription() const;
+ mozilla::Sdp* GetParsedRemoteDescription() const;
+ const Sdp* GetAnswer() const;
+
+ std::vector<JsepSendingTrack> mLocalTracks;
+ std::vector<JsepReceivingTrack> mRemoteTracks;
+ // By the most recent SetRemoteDescription
+ std::vector<JsepReceivingTrack> mRemoteTracksAdded;
+ std::vector<JsepReceivingTrack> mRemoteTracksRemoved;
+ std::vector<RefPtr<JsepTransport> > mTransports;
+ // So we can rollback
+ std::vector<RefPtr<JsepTransport> > mOldTransports;
+ std::vector<JsepTrackPair> mNegotiatedTrackPairs;
+
+ bool mIsOfferer;
+ bool mWasOffererLastTime;
+ bool mIceControlling;
+ std::string mIceUfrag;
+ std::string mIcePwd;
+ bool mRemoteIsIceLite;
+ bool mRemoteIceIsRestarting;
+ std::vector<std::string> mIceOptions;
+ JsepBundlePolicy mBundlePolicy;
+ std::vector<JsepDtlsFingerprint> mDtlsFingerprints;
+ uint64_t mSessionId;
+ uint64_t mSessionVersion;
+ std::vector<SdpExtmapAttributeList::Extmap> mAudioRtpExtensions;
+ std::vector<SdpExtmapAttributeList::Extmap> mVideoRtpExtensions;
+ UniquePtr<JsepUuidGenerator> mUuidGen;
+ std::string mDefaultRemoteStreamId;
+ std::map<size_t, std::string> mDefaultRemoteTrackIdsByLevel;
+ std::string mCNAME;
+ // Used to prevent duplicate local SSRCs. Not used to prevent local/remote or
+ // remote-only duplication, which will be important for EKT but not now.
+ std::set<uint32_t> mSsrcs;
+ // When an m-section doesn't have a local track, it still needs an ssrc, which
+ // is stored here.
+ std::vector<uint32_t> mRecvonlySsrcs;
+ UniquePtr<Sdp> mGeneratedLocalDescription; // Created but not set.
+ UniquePtr<Sdp> mCurrentLocalDescription;
+ UniquePtr<Sdp> mCurrentRemoteDescription;
+ UniquePtr<Sdp> mPendingLocalDescription;
+ UniquePtr<Sdp> mPendingRemoteDescription;
+ PtrVector<JsepCodecDescription> mSupportedCodecs;
+ std::string mLastError;
+ SipccSdpParser mParser;
+ SdpHelper mSdpHelper;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/jsep/JsepTrack.cpp b/media/webrtc/signaling/src/jsep/JsepTrack.cpp
new file mode 100644
index 000000000..1b045d8ec
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepTrack.cpp
@@ -0,0 +1,531 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepCodecDescription.h"
+#include "signaling/src/jsep/JsepTrackEncoding.h"
+
+#include <algorithm>
+
+namespace mozilla
+{
+void
+JsepTrack::GetNegotiatedPayloadTypes(std::vector<uint16_t>* payloadTypes)
+{
+ if (!mNegotiatedDetails) {
+ return;
+ }
+
+ for (const auto* encoding : mNegotiatedDetails->mEncodings.values) {
+ GetPayloadTypes(encoding->GetCodecs(), payloadTypes);
+ }
+
+ // Prune out dupes
+ std::sort(payloadTypes->begin(), payloadTypes->end());
+ auto newEnd = std::unique(payloadTypes->begin(), payloadTypes->end());
+ payloadTypes->erase(newEnd, payloadTypes->end());
+}
+
+/* static */
+void
+JsepTrack::GetPayloadTypes(
+ const std::vector<JsepCodecDescription*>& codecs,
+ std::vector<uint16_t>* payloadTypes)
+{
+ for (JsepCodecDescription* codec : codecs) {
+ uint16_t pt;
+ if (!codec->GetPtAsInt(&pt)) {
+ MOZ_ASSERT(false);
+ continue;
+ }
+ payloadTypes->push_back(pt);
+ }
+}
+
+void
+JsepTrack::EnsureNoDuplicatePayloadTypes(
+ std::vector<JsepCodecDescription*>* codecs)
+{
+ std::set<uint16_t> uniquePayloadTypes;
+
+ for (JsepCodecDescription* codec : *codecs) {
+ // We assume there are no dupes in negotiated codecs; unnegotiated codecs
+ // need to change if there is a clash.
+ if (!codec->mEnabled) {
+ continue;
+ }
+
+ // Disable, and only re-enable if we can ensure it has a unique pt.
+ codec->mEnabled = false;
+
+ uint16_t currentPt;
+ if (!codec->GetPtAsInt(&currentPt)) {
+ MOZ_ASSERT(false);
+ continue;
+ }
+
+ if (!uniquePayloadTypes.count(currentPt)) {
+ codec->mEnabled = true;
+ uniquePayloadTypes.insert(currentPt);
+ continue;
+ }
+
+ // |codec| cannot use its current payload type. Try to find another.
+ for (uint16_t freePt = 96; freePt <= 127; ++freePt) {
+ // Not super efficient, but readability is probably more important.
+ if (!uniquePayloadTypes.count(freePt)) {
+ uniquePayloadTypes.insert(freePt);
+ codec->mEnabled = true;
+ std::ostringstream os;
+ os << freePt;
+ codec->mDefaultPt = os.str();
+ break;
+ }
+ }
+ }
+}
+
+void
+JsepTrack::PopulateCodecs(const std::vector<JsepCodecDescription*>& prototype)
+{
+ for (const JsepCodecDescription* prototypeCodec : prototype) {
+ if (prototypeCodec->mType == mType) {
+ mPrototypeCodecs.values.push_back(prototypeCodec->Clone());
+ mPrototypeCodecs.values.back()->mDirection = mDirection;
+ }
+ }
+
+ EnsureNoDuplicatePayloadTypes(&mPrototypeCodecs.values);
+}
+
+void
+JsepTrack::AddToOffer(SdpMediaSection* offer) const
+{
+ AddToMsection(mPrototypeCodecs.values, offer);
+ if (mDirection == sdp::kSend) {
+ AddToMsection(mJsEncodeConstraints, sdp::kSend, offer);
+ }
+}
+
+void
+JsepTrack::AddToAnswer(const SdpMediaSection& offer,
+ SdpMediaSection* answer) const
+{
+ // We do not modify mPrototypeCodecs here, since we're only creating an
+ // answer. Once offer/answer concludes, we will update mPrototypeCodecs.
+ PtrVector<JsepCodecDescription> codecs;
+ codecs.values = GetCodecClones();
+ NegotiateCodecs(offer, &codecs.values);
+ if (codecs.values.empty()) {
+ return;
+ }
+
+ AddToMsection(codecs.values, answer);
+
+ if (mDirection == sdp::kSend) {
+ std::vector<JsConstraints> constraints;
+ std::vector<SdpRidAttributeList::Rid> rids;
+ GetRids(offer, sdp::kRecv, &rids);
+ NegotiateRids(rids, &constraints);
+ AddToMsection(constraints, sdp::kSend, answer);
+ }
+}
+
+void
+JsepTrack::AddToMsection(const std::vector<JsepCodecDescription*>& codecs,
+ SdpMediaSection* msection) const
+{
+ MOZ_ASSERT(msection->GetMediaType() == mType);
+ MOZ_ASSERT(!codecs.empty());
+
+ for (const JsepCodecDescription* codec : codecs) {
+ codec->AddToMediaSection(*msection);
+ }
+
+ if (mDirection == sdp::kSend) {
+ if (msection->GetMediaType() != SdpMediaSection::kApplication) {
+ msection->SetSsrcs(mSsrcs, mCNAME);
+ msection->AddMsid(mStreamId, mTrackId);
+ }
+ msection->SetSending(true);
+ } else {
+ msection->SetReceiving(true);
+ }
+}
+
+// Updates the |id| values in |constraintsList| with the rid values in |rids|,
+// where necessary.
+void
+JsepTrack::NegotiateRids(const std::vector<SdpRidAttributeList::Rid>& rids,
+ std::vector<JsConstraints>* constraintsList) const
+{
+ for (const SdpRidAttributeList::Rid& rid : rids) {
+ if (!FindConstraints(rid.id, *constraintsList)) {
+ // Pair up the first JsConstraints with an empty id, if it exists.
+ JsConstraints* constraints = FindConstraints("", *constraintsList);
+ if (constraints) {
+ constraints->rid = rid.id;
+ }
+ }
+ }
+}
+
+/* static */
+void
+JsepTrack::AddToMsection(const std::vector<JsConstraints>& constraintsList,
+ sdp::Direction direction,
+ SdpMediaSection* msection)
+{
+ UniquePtr<SdpSimulcastAttribute> simulcast(new SdpSimulcastAttribute);
+ UniquePtr<SdpRidAttributeList> rids(new SdpRidAttributeList);
+ for (const JsConstraints& constraints : constraintsList) {
+ if (!constraints.rid.empty()) {
+ SdpRidAttributeList::Rid rid;
+ rid.id = constraints.rid;
+ rid.direction = direction;
+ rids->mRids.push_back(rid);
+
+ SdpSimulcastAttribute::Version version;
+ version.choices.push_back(constraints.rid);
+ if (direction == sdp::kSend) {
+ simulcast->sendVersions.push_back(version);
+ } else {
+ simulcast->recvVersions.push_back(version);
+ }
+ }
+ }
+
+ if (!rids->mRids.empty()) {
+ msection->GetAttributeList().SetAttribute(simulcast.release());
+ msection->GetAttributeList().SetAttribute(rids.release());
+ }
+}
+
+void
+JsepTrack::GetRids(const SdpMediaSection& msection,
+ sdp::Direction direction,
+ std::vector<SdpRidAttributeList::Rid>* rids) const
+{
+ rids->clear();
+ if (!msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kSimulcastAttribute)) {
+ return;
+ }
+
+ const SdpSimulcastAttribute& simulcast(
+ msection.GetAttributeList().GetSimulcast());
+
+ const SdpSimulcastAttribute::Versions* versions = nullptr;
+ switch (direction) {
+ case sdp::kSend:
+ versions = &simulcast.sendVersions;
+ break;
+ case sdp::kRecv:
+ versions = &simulcast.recvVersions;
+ break;
+ }
+
+ if (!versions->IsSet()) {
+ return;
+ }
+
+ if (versions->type != SdpSimulcastAttribute::Versions::kRid) {
+ // No support for PT-based simulcast, yet.
+ return;
+ }
+
+ for (const SdpSimulcastAttribute::Version& version : *versions) {
+ if (!version.choices.empty()) {
+ // We validate that rids are present (and sane) elsewhere.
+ rids->push_back(*msection.FindRid(version.choices[0]));
+ }
+ }
+}
+
+JsepTrack::JsConstraints*
+JsepTrack::FindConstraints(const std::string& id,
+ std::vector<JsConstraints>& constraintsList) const
+{
+ for (JsConstraints& constraints : constraintsList) {
+ if (constraints.rid == id) {
+ return &constraints;
+ }
+ }
+ return nullptr;
+}
+
+void
+JsepTrack::CreateEncodings(
+ const SdpMediaSection& remote,
+ const std::vector<JsepCodecDescription*>& negotiatedCodecs,
+ JsepTrackNegotiatedDetails* negotiatedDetails)
+{
+ std::vector<SdpRidAttributeList::Rid> rids;
+ GetRids(remote, sdp::kRecv, &rids); // Get rids we will send
+ NegotiateRids(rids, &mJsEncodeConstraints);
+ if (rids.empty()) {
+ // Add dummy value with an empty id to make sure we get a single unicast
+ // stream.
+ rids.push_back(SdpRidAttributeList::Rid());
+ }
+
+ // For each rid in the remote, make sure we have an encoding, and configure
+ // that encoding appropriately.
+ for (size_t i = 0; i < rids.size(); ++i) {
+ if (i == negotiatedDetails->mEncodings.values.size()) {
+ negotiatedDetails->mEncodings.values.push_back(new JsepTrackEncoding);
+ }
+
+ JsepTrackEncoding* encoding = negotiatedDetails->mEncodings.values[i];
+
+ for (const JsepCodecDescription* codec : negotiatedCodecs) {
+ if (rids[i].HasFormat(codec->mDefaultPt)) {
+ encoding->AddCodec(*codec);
+ }
+ }
+
+ encoding->mRid = rids[i].id;
+ // If we end up supporting params for rid, we would handle that here.
+
+ // Incorporate the corresponding JS encoding constraints, if they exist
+ for (const JsConstraints& jsConstraints : mJsEncodeConstraints) {
+ if (jsConstraints.rid == rids[i].id) {
+ encoding->mConstraints = jsConstraints.constraints;
+ }
+ }
+
+ encoding->UpdateMaxBitrate(remote);
+ }
+}
+
+std::vector<JsepCodecDescription*>
+JsepTrack::GetCodecClones() const
+{
+ std::vector<JsepCodecDescription*> clones;
+ for (const JsepCodecDescription* codec : mPrototypeCodecs.values) {
+ clones.push_back(codec->Clone());
+ }
+ return clones;
+}
+
+static bool
+CompareCodec(const JsepCodecDescription* lhs, const JsepCodecDescription* rhs)
+{
+ return lhs->mStronglyPreferred && !rhs->mStronglyPreferred;
+}
+
+void
+JsepTrack::NegotiateCodecs(
+ const SdpMediaSection& remote,
+ std::vector<JsepCodecDescription*>* codecs,
+ std::map<std::string, std::string>* formatChanges) const
+{
+ PtrVector<JsepCodecDescription> unnegotiatedCodecs;
+ std::swap(unnegotiatedCodecs.values, *codecs);
+
+ // Outer loop establishes the remote side's preference
+ for (const std::string& fmt : remote.GetFormats()) {
+ for (size_t i = 0; i < unnegotiatedCodecs.values.size(); ++i) {
+ JsepCodecDescription* codec = unnegotiatedCodecs.values[i];
+ if (!codec || !codec->mEnabled || !codec->Matches(fmt, remote)) {
+ continue;
+ }
+
+ std::string originalFormat = codec->mDefaultPt;
+ if(codec->Negotiate(fmt, remote)) {
+ codecs->push_back(codec);
+ unnegotiatedCodecs.values[i] = nullptr;
+ if (formatChanges) {
+ (*formatChanges)[originalFormat] = codec->mDefaultPt;
+ }
+ break;
+ }
+ }
+ }
+
+ // Find the (potential) red codec and ulpfec codec or telephone-event
+ JsepVideoCodecDescription* red = nullptr;
+ JsepVideoCodecDescription* ulpfec = nullptr;
+ JsepAudioCodecDescription* dtmf = nullptr;
+ // We can safely cast here since JsepTrack has a MediaType and only codecs
+ // that match that MediaType (kAudio or kVideo) are added.
+ for (auto codec : *codecs) {
+ if (codec->mName == "red") {
+ red = static_cast<JsepVideoCodecDescription*>(codec);
+ }
+ else if (codec->mName == "ulpfec") {
+ ulpfec = static_cast<JsepVideoCodecDescription*>(codec);
+ }
+ else if (codec->mName == "telephone-event") {
+ dtmf = static_cast<JsepAudioCodecDescription*>(codec);
+ }
+ }
+ // if we have a red codec remove redundant encodings that don't exist
+ if (red) {
+ // Since we could have an externally specified redundant endcodings
+ // list, we shouldn't simply rebuild the redundant encodings list
+ // based on the current list of codecs.
+ std::vector<uint8_t> unnegotiatedEncodings;
+ std::swap(unnegotiatedEncodings, red->mRedundantEncodings);
+ for (auto redundantPt : unnegotiatedEncodings) {
+ std::string pt = std::to_string(redundantPt);
+ for (auto codec : *codecs) {
+ if (pt == codec->mDefaultPt) {
+ red->mRedundantEncodings.push_back(redundantPt);
+ break;
+ }
+ }
+ }
+ }
+ // Video FEC is indicated by the existence of the red and ulpfec
+ // codecs and not an attribute on the particular video codec (like in
+ // a rtcpfb attr). If we see both red and ulpfec codecs, we enable FEC
+ // on all the other codecs.
+ if (red && ulpfec) {
+ for (auto codec : *codecs) {
+ if (codec->mName != "red" && codec->mName != "ulpfec") {
+ JsepVideoCodecDescription* videoCodec =
+ static_cast<JsepVideoCodecDescription*>(codec);
+ videoCodec->EnableFec();
+ }
+ }
+ }
+
+ // Dtmf support is indicated by the existence of the telephone-event
+ // codec, and not an attribute on the particular audio codec (like in a
+ // rtcpfb attr). If we see the telephone-event codec, we enabled dtmf
+ // support on all the other audio codecs.
+ if (dtmf) {
+ for (auto codec : *codecs) {
+ JsepAudioCodecDescription* audioCodec =
+ static_cast<JsepAudioCodecDescription*>(codec);
+ audioCodec->mDtmfEnabled = true;
+ }
+ }
+
+ // Make sure strongly preferred codecs are up front, overriding the remote
+ // side's preference.
+ std::stable_sort(codecs->begin(), codecs->end(), CompareCodec);
+
+ // TODO(bug 814227): Remove this once we're ready to put multiple codecs in an
+ // answer. For now, remove all but the first codec unless the red codec
+ // exists, and then we include the others per RFC 5109, section 14.2.
+ // Note: now allows keeping the telephone-event codec, if it appears, as the
+ // last codec in the list.
+ if (!codecs->empty() && !red) {
+ int newSize = dtmf ? 2 : 1;
+ for (size_t i = 1; i < codecs->size(); ++i) {
+ if (!dtmf || dtmf != (*codecs)[i]) {
+ delete (*codecs)[i];
+ (*codecs)[i] = nullptr;
+ }
+ }
+ if (dtmf) {
+ (*codecs)[newSize-1] = dtmf;
+ }
+ codecs->resize(newSize);
+ }
+}
+
+void
+JsepTrack::Negotiate(const SdpMediaSection& answer,
+ const SdpMediaSection& remote)
+{
+ PtrVector<JsepCodecDescription> negotiatedCodecs;
+ negotiatedCodecs.values = GetCodecClones();
+
+ std::map<std::string, std::string> formatChanges;
+ NegotiateCodecs(remote,
+ &negotiatedCodecs.values,
+ &formatChanges);
+
+ // Use |formatChanges| to update mPrototypeCodecs
+ size_t insertPos = 0;
+ for (size_t i = 0; i < mPrototypeCodecs.values.size(); ++i) {
+ if (formatChanges.count(mPrototypeCodecs.values[i]->mDefaultPt)) {
+ // Update the payload type to what was negotiated
+ mPrototypeCodecs.values[i]->mDefaultPt =
+ formatChanges[mPrototypeCodecs.values[i]->mDefaultPt];
+ // Move this negotiated codec up front
+ std::swap(mPrototypeCodecs.values[insertPos],
+ mPrototypeCodecs.values[i]);
+ ++insertPos;
+ }
+ }
+
+ EnsureNoDuplicatePayloadTypes(&mPrototypeCodecs.values);
+
+ UniquePtr<JsepTrackNegotiatedDetails> negotiatedDetails =
+ MakeUnique<JsepTrackNegotiatedDetails>();
+
+ CreateEncodings(remote, negotiatedCodecs.values, negotiatedDetails.get());
+
+ if (answer.GetAttributeList().HasAttribute(SdpAttribute::kExtmapAttribute)) {
+ for (auto& extmapAttr : answer.GetAttributeList().GetExtmap().mExtmaps) {
+ negotiatedDetails->mExtmap[extmapAttr.extensionname] = extmapAttr;
+ }
+ }
+
+ if (mDirection == sdp::kRecv) {
+ mSsrcs.clear();
+ if (remote.GetAttributeList().HasAttribute(SdpAttribute::kSsrcAttribute)) {
+ for (auto& ssrcAttr : remote.GetAttributeList().GetSsrc().mSsrcs) {
+ AddSsrc(ssrcAttr.ssrc);
+ }
+ }
+ }
+
+ mNegotiatedDetails = Move(negotiatedDetails);
+}
+
+// When doing bundle, if all else fails we can try to figure out which m-line a
+// given RTP packet belongs to by looking at the payload type field. This only
+// works, however, if that payload type appeared in only one m-section.
+// We figure that out here.
+/* static */
+void
+JsepTrack::SetUniquePayloadTypes(const std::vector<RefPtr<JsepTrack>>& tracks)
+{
+ // Maps to track details if no other track contains the payload type,
+ // otherwise maps to nullptr.
+ std::map<uint16_t, JsepTrackNegotiatedDetails*> payloadTypeToDetailsMap;
+
+ for (const RefPtr<JsepTrack>& track : tracks) {
+ if (track->GetMediaType() == SdpMediaSection::kApplication) {
+ continue;
+ }
+
+ auto* details = track->GetNegotiatedDetails();
+ if (!details) {
+ // Can happen if negotiation fails on a track
+ continue;
+ }
+
+ std::vector<uint16_t> payloadTypesForTrack;
+ track->GetNegotiatedPayloadTypes(&payloadTypesForTrack);
+
+ for (uint16_t pt : payloadTypesForTrack) {
+ if (payloadTypeToDetailsMap.count(pt)) {
+ // Found in more than one track, not unique
+ payloadTypeToDetailsMap[pt] = nullptr;
+ } else {
+ payloadTypeToDetailsMap[pt] = details;
+ }
+ }
+ }
+
+ for (auto ptAndDetails : payloadTypeToDetailsMap) {
+ uint16_t uniquePt = ptAndDetails.first;
+ MOZ_ASSERT(uniquePt <= UINT8_MAX);
+ auto trackDetails = ptAndDetails.second;
+
+ if (trackDetails) {
+ trackDetails->mUniquePayloadTypes.push_back(
+ static_cast<uint8_t>(uniquePt));
+ }
+ }
+}
+
+} // namespace mozilla
+
diff --git a/media/webrtc/signaling/src/jsep/JsepTrack.h b/media/webrtc/signaling/src/jsep/JsepTrack.h
new file mode 100644
index 000000000..5aa37404f
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepTrack.h
@@ -0,0 +1,292 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _JSEPTRACK_H_
+#define _JSEPTRACK_H_
+
+#include <algorithm>
+#include <string>
+#include <map>
+#include <set>
+
+#include <mozilla/RefPtr.h>
+#include <mozilla/UniquePtr.h>
+#include <mozilla/Maybe.h>
+#include "nsISupportsImpl.h"
+#include "nsError.h"
+
+#include "signaling/src/jsep/JsepTransport.h"
+#include "signaling/src/jsep/JsepTrackEncoding.h"
+#include "signaling/src/sdp/Sdp.h"
+#include "signaling/src/sdp/SdpAttribute.h"
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/common/PtrVector.h"
+
+namespace mozilla {
+
+class JsepTrackNegotiatedDetails
+{
+public:
+ size_t
+ GetEncodingCount() const
+ {
+ return mEncodings.values.size();
+ }
+
+ const JsepTrackEncoding&
+ GetEncoding(size_t index) const
+ {
+ MOZ_RELEASE_ASSERT(index < mEncodings.values.size());
+ return *mEncodings.values[index];
+ }
+
+ const SdpExtmapAttributeList::Extmap*
+ GetExt(const std::string& ext_name) const
+ {
+ auto it = mExtmap.find(ext_name);
+ if (it != mExtmap.end()) {
+ return &it->second;
+ }
+ return nullptr;
+ }
+
+ std::vector<uint8_t> GetUniquePayloadTypes() const
+ {
+ return mUniquePayloadTypes;
+ }
+
+private:
+ friend class JsepTrack;
+
+ std::map<std::string, SdpExtmapAttributeList::Extmap> mExtmap;
+ std::vector<uint8_t> mUniquePayloadTypes;
+ PtrVector<JsepTrackEncoding> mEncodings;
+};
+
+class JsepTrack
+{
+public:
+ JsepTrack(mozilla::SdpMediaSection::MediaType type,
+ const std::string& streamid,
+ const std::string& trackid,
+ sdp::Direction direction = sdp::kSend)
+ : mType(type),
+ mStreamId(streamid),
+ mTrackId(trackid),
+ mDirection(direction),
+ mActive(false)
+ {}
+
+ virtual mozilla::SdpMediaSection::MediaType
+ GetMediaType() const
+ {
+ return mType;
+ }
+
+ virtual const std::string&
+ GetStreamId() const
+ {
+ return mStreamId;
+ }
+
+ virtual void
+ SetStreamId(const std::string& id)
+ {
+ mStreamId = id;
+ }
+
+ virtual const std::string&
+ GetTrackId() const
+ {
+ return mTrackId;
+ }
+
+ virtual void
+ SetTrackId(const std::string& id)
+ {
+ mTrackId = id;
+ }
+
+ virtual const std::string&
+ GetCNAME() const
+ {
+ return mCNAME;
+ }
+
+ virtual void
+ SetCNAME(const std::string& cname)
+ {
+ mCNAME = cname;
+ }
+
+ virtual sdp::Direction
+ GetDirection() const
+ {
+ return mDirection;
+ }
+
+ virtual const std::vector<uint32_t>&
+ GetSsrcs() const
+ {
+ return mSsrcs;
+ }
+
+ virtual void
+ AddSsrc(uint32_t ssrc)
+ {
+ mSsrcs.push_back(ssrc);
+ }
+
+ bool
+ GetActive() const
+ {
+ return mActive;
+ }
+
+ void
+ SetActive(bool active)
+ {
+ mActive = active;
+ }
+
+ virtual void PopulateCodecs(
+ const std::vector<JsepCodecDescription*>& prototype);
+
+ template <class UnaryFunction>
+ void ForEachCodec(UnaryFunction func)
+ {
+ std::for_each(mPrototypeCodecs.values.begin(),
+ mPrototypeCodecs.values.end(), func);
+ }
+
+ template <class BinaryPredicate>
+ void SortCodecs(BinaryPredicate sorter)
+ {
+ std::stable_sort(mPrototypeCodecs.values.begin(),
+ mPrototypeCodecs.values.end(), sorter);
+ }
+
+ virtual void AddToOffer(SdpMediaSection* offer) const;
+ virtual void AddToAnswer(const SdpMediaSection& offer,
+ SdpMediaSection* answer) const;
+ virtual void Negotiate(const SdpMediaSection& answer,
+ const SdpMediaSection& remote);
+ static void SetUniquePayloadTypes(
+ const std::vector<RefPtr<JsepTrack>>& tracks);
+ virtual void GetNegotiatedPayloadTypes(std::vector<uint16_t>* payloadTypes);
+
+ // This will be set when negotiation is carried out.
+ virtual const JsepTrackNegotiatedDetails*
+ GetNegotiatedDetails() const
+ {
+ if (mNegotiatedDetails) {
+ return mNegotiatedDetails.get();
+ }
+ return nullptr;
+ }
+
+ virtual JsepTrackNegotiatedDetails*
+ GetNegotiatedDetails()
+ {
+ if (mNegotiatedDetails) {
+ return mNegotiatedDetails.get();
+ }
+ return nullptr;
+ }
+
+ virtual void
+ ClearNegotiatedDetails()
+ {
+ mNegotiatedDetails.reset();
+ }
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(JsepTrack);
+
+ struct JsConstraints
+ {
+ std::string rid;
+ EncodingConstraints constraints;
+ };
+
+ void SetJsConstraints(const std::vector<JsConstraints>& constraintsList)
+ {
+ mJsEncodeConstraints = constraintsList;
+ }
+
+ void GetJsConstraints(std::vector<JsConstraints>* outConstraintsList) const
+ {
+ MOZ_ASSERT(outConstraintsList);
+ *outConstraintsList = mJsEncodeConstraints;
+ }
+
+ static void AddToMsection(const std::vector<JsConstraints>& constraintsList,
+ sdp::Direction direction,
+ SdpMediaSection* msection);
+
+protected:
+ virtual ~JsepTrack() {}
+
+private:
+ std::vector<JsepCodecDescription*> GetCodecClones() const;
+ static void EnsureNoDuplicatePayloadTypes(
+ std::vector<JsepCodecDescription*>* codecs);
+ static void GetPayloadTypes(
+ const std::vector<JsepCodecDescription*>& codecs,
+ std::vector<uint16_t>* pts);
+ static void EnsurePayloadTypeIsUnique(std::set<uint16_t>* uniquePayloadTypes,
+ JsepCodecDescription* codec);
+ void AddToMsection(const std::vector<JsepCodecDescription*>& codecs,
+ SdpMediaSection* msection) const;
+ void GetRids(const SdpMediaSection& msection,
+ sdp::Direction direction,
+ std::vector<SdpRidAttributeList::Rid>* rids) const;
+ void CreateEncodings(
+ const SdpMediaSection& remote,
+ const std::vector<JsepCodecDescription*>& negotiatedCodecs,
+ JsepTrackNegotiatedDetails* details);
+
+ // |formatChanges| is set on completion of offer/answer, and records how the
+ // formats in |codecs| were changed, which is used by |Negotiate| to update
+ // |mPrototypeCodecs|.
+ virtual void NegotiateCodecs(
+ const SdpMediaSection& remote,
+ std::vector<JsepCodecDescription*>* codecs,
+ std::map<std::string, std::string>* formatChanges = nullptr) const;
+
+ JsConstraints* FindConstraints(
+ const std::string& rid,
+ std::vector<JsConstraints>& constraintsList) const;
+ void NegotiateRids(const std::vector<SdpRidAttributeList::Rid>& rids,
+ std::vector<JsConstraints>* constraints) const;
+
+ const mozilla::SdpMediaSection::MediaType mType;
+ std::string mStreamId;
+ std::string mTrackId;
+ std::string mCNAME;
+ const sdp::Direction mDirection;
+ PtrVector<JsepCodecDescription> mPrototypeCodecs;
+ // Holds encoding params/constraints from JS. Simulcast happens when there are
+ // multiple of these. If there are none, we assume unconstrained unicast with
+ // no rid.
+ std::vector<JsConstraints> mJsEncodeConstraints;
+ UniquePtr<JsepTrackNegotiatedDetails> mNegotiatedDetails;
+ std::vector<uint32_t> mSsrcs;
+ bool mActive;
+};
+
+// Need a better name for this.
+struct JsepTrackPair {
+ size_t mLevel;
+ // Is this track pair sharing a transport with another?
+ Maybe<size_t> mBundleLevel;
+ uint32_t mRecvonlySsrc;
+ RefPtr<JsepTrack> mSending;
+ RefPtr<JsepTrack> mReceiving;
+ RefPtr<JsepTransport> mRtpTransport;
+ RefPtr<JsepTransport> mRtcpTransport;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/jsep/JsepTrackEncoding.h b/media/webrtc/signaling/src/jsep/JsepTrackEncoding.h
new file mode 100644
index 000000000..b59b672e4
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepTrackEncoding.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _JESPTRACKENCODING_H_
+#define _JESPTRACKENCODING_H_
+
+#include "signaling/src/jsep/JsepCodecDescription.h"
+#include "signaling/src/common/EncodingConstraints.h"
+#include "signaling/src/common/PtrVector.h"
+
+namespace mozilla {
+// Represents a single encoding of a media track. When simulcast is used, there
+// may be multiple. Each encoding may have some constraints (imposed by JS), and
+// may be able to use any one of multiple codecs (JsepCodecDescription) at any
+// given time.
+class JsepTrackEncoding
+{
+public:
+ const std::vector<JsepCodecDescription*>& GetCodecs() const
+ {
+ return mCodecs.values;
+ }
+
+ void AddCodec(const JsepCodecDescription& codec)
+ {
+ mCodecs.values.push_back(codec.Clone());
+ }
+
+ bool HasFormat(const std::string& format) const
+ {
+ for (const JsepCodecDescription* codec : mCodecs.values) {
+ if (codec->mDefaultPt == format) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void UpdateMaxBitrate(const SdpMediaSection& remote)
+ {
+ uint32_t tias = remote.GetBandwidth("TIAS");
+ // select minimum of the two which is not zero
+ mConstraints.maxBr = std::min(tias ? tias : mConstraints.maxBr,
+ mConstraints.maxBr ? mConstraints.maxBr :
+ tias);
+ // TODO add support for b=AS if TIAS is not set (bug 976521)
+ }
+
+ EncodingConstraints mConstraints;
+ std::string mRid;
+
+private:
+ PtrVector<JsepCodecDescription> mCodecs;
+};
+}
+
+#endif // _JESPTRACKENCODING_H_
diff --git a/media/webrtc/signaling/src/jsep/JsepTransport.h b/media/webrtc/signaling/src/jsep/JsepTransport.h
new file mode 100644
index 000000000..3b0d38ad6
--- /dev/null
+++ b/media/webrtc/signaling/src/jsep/JsepTransport.h
@@ -0,0 +1,116 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _JSEPTRANSPORT_H_
+#define _JSEPTRANSPORT_H_
+
+#include <string>
+#include <vector>
+
+#include <mozilla/RefPtr.h>
+#include <mozilla/UniquePtr.h>
+#include "nsISupportsImpl.h"
+
+#include "signaling/src/sdp/SdpAttribute.h"
+
+namespace mozilla {
+
+class JsepDtlsTransport
+{
+public:
+ JsepDtlsTransport() : mRole(kJsepDtlsInvalidRole) {}
+
+ virtual ~JsepDtlsTransport() {}
+
+ enum Role {
+ kJsepDtlsClient,
+ kJsepDtlsServer,
+ kJsepDtlsInvalidRole
+ };
+
+ virtual const SdpFingerprintAttributeList&
+ GetFingerprints() const
+ {
+ return mFingerprints;
+ }
+
+ virtual Role
+ GetRole() const
+ {
+ return mRole;
+ }
+
+private:
+ friend class JsepSessionImpl;
+
+ SdpFingerprintAttributeList mFingerprints;
+ Role mRole;
+};
+
+class JsepIceTransport
+{
+public:
+ JsepIceTransport() {}
+
+ virtual ~JsepIceTransport() {}
+
+ const std::string&
+ GetUfrag() const
+ {
+ return mUfrag;
+ }
+ const std::string&
+ GetPassword() const
+ {
+ return mPwd;
+ }
+ const std::vector<std::string>&
+ GetCandidates() const
+ {
+ return mCandidates;
+ }
+
+private:
+ friend class JsepSessionImpl;
+
+ std::string mUfrag;
+ std::string mPwd;
+ std::vector<std::string> mCandidates;
+};
+
+class JsepTransport
+{
+public:
+ JsepTransport()
+ : mComponents(0)
+ {
+ }
+
+ void Close()
+ {
+ mComponents = 0;
+ mTransportId.clear();
+ mIce.reset();
+ mDtls.reset();
+ }
+
+ // Unique identifier for this transport within this call. Group?
+ std::string mTransportId;
+
+ // ICE stuff.
+ UniquePtr<JsepIceTransport> mIce;
+ UniquePtr<JsepDtlsTransport> mDtls;
+
+ // Number of required components.
+ size_t mComponents;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(JsepTransport);
+
+protected:
+ ~JsepTransport() {}
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
new file mode 100755
index 000000000..2c57431e7
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -0,0 +1,1134 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+#include "nspr.h"
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#elif defined XP_WIN
+#include <winsock2.h>
+#endif
+
+#include "AudioConduit.h"
+#include "nsCOMPtr.h"
+#include "mozilla/Services.h"
+#include "nsServiceManagerUtils.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+#include "nsThreadUtils.h"
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "Latency.h"
+#include "mozilla/Telemetry.h"
+#endif
+
+#include "webrtc/common.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/voice_engine/include/voe_dtmf.h"
+#include "webrtc/voice_engine/include/voe_errors.h"
+#include "webrtc/voice_engine/voice_engine_impl.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+
+#ifdef MOZ_WIDGET_ANDROID
+#include "AndroidJNIWrapper.h"
+#endif
+
+namespace mozilla {
+
+static const char* logTag ="WebrtcAudioSessionConduit";
+
+// 32 bytes is what WebRTC CodecInst expects
+const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32;
+
+/**
+ * Factory Method for AudioConduit
+ */
+RefPtr<AudioSessionConduit> AudioSessionConduit::Create()
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+
+ WebrtcAudioConduit* obj = new WebrtcAudioConduit();
+ if(obj->Init() != kMediaConduitNoError)
+ {
+ CSFLogError(logTag, "%s AudioConduit Init Failed ", __FUNCTION__);
+ delete obj;
+ return nullptr;
+ }
+ CSFLogDebug(logTag, "%s Successfully created AudioConduit ", __FUNCTION__);
+ return obj;
+}
+
+/**
+ * Destruction defines for our super-classes
+ */
+WebrtcAudioConduit::~WebrtcAudioConduit()
+{
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++)
+ {
+ delete mRecvCodecList[i];
+ }
+
+ // The first one of a pair to be deleted shuts down media for both
+ if(mPtrVoEXmedia)
+ {
+ mPtrVoEXmedia->SetExternalRecordingStatus(false);
+ mPtrVoEXmedia->SetExternalPlayoutStatus(false);
+ }
+
+ //Deal with the transport
+ if(mPtrVoENetwork)
+ {
+ mPtrVoENetwork->DeRegisterExternalTransport(mChannel);
+ }
+
+ if(mPtrVoEBase)
+ {
+ mPtrVoEBase->StopPlayout(mChannel);
+ mPtrVoEBase->StopSend(mChannel);
+ mPtrVoEBase->StopReceive(mChannel);
+ mPtrVoEBase->DeleteChannel(mChannel);
+ mPtrVoEBase->Terminate();
+ }
+
+ // We shouldn't delete the VoiceEngine until all these are released!
+ // And we can't use a Scoped ptr, since the order is arbitrary
+ mPtrVoENetwork = nullptr;
+ mPtrVoEBase = nullptr;
+ mPtrVoECodec = nullptr;
+ mPtrVoEXmedia = nullptr;
+ mPtrVoEProcessing = nullptr;
+ mPtrVoEVideoSync = nullptr;
+ mPtrVoERTP_RTCP = nullptr;
+ mPtrRTP = nullptr;
+
+ if(mVoiceEngine)
+ {
+ webrtc::VoiceEngine::Delete(mVoiceEngine);
+ }
+}
+
+bool WebrtcAudioConduit::SetLocalSSRC(unsigned int ssrc)
+{
+ unsigned int oldSsrc;
+ if (!GetLocalSSRC(&oldSsrc)) {
+ MOZ_ASSERT(false, "GetLocalSSRC failed");
+ return false;
+ }
+
+ if (oldSsrc == ssrc) {
+ return true;
+ }
+
+ bool wasTransmitting = mEngineTransmitting;
+ if (StopTransmitting() != kMediaConduitNoError) {
+ return false;
+ }
+
+ if (mPtrRTP->SetLocalSSRC(mChannel, ssrc)) {
+ return false;
+ }
+
+ if (wasTransmitting) {
+ if (StartTransmitting() != kMediaConduitNoError) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WebrtcAudioConduit::GetLocalSSRC(unsigned int* ssrc) {
+ return !mPtrRTP->GetLocalSSRC(mChannel, *ssrc);
+}
+
+bool WebrtcAudioConduit::GetRemoteSSRC(unsigned int* ssrc) {
+ return !mPtrRTP->GetRemoteSSRC(mChannel, *ssrc);
+}
+
+bool WebrtcAudioConduit::SetLocalCNAME(const char* cname)
+{
+ char temp[256];
+ strncpy(temp, cname, sizeof(temp) - 1);
+ temp[sizeof(temp) - 1] = 0;
+ return !mPtrRTP->SetRTCP_CNAME(mChannel, temp);
+}
+
+bool WebrtcAudioConduit::GetAVStats(int32_t* jitterBufferDelayMs,
+ int32_t* playoutBufferDelayMs,
+ int32_t* avSyncOffsetMs) {
+ return !mPtrVoEVideoSync->GetDelayEstimate(mChannel,
+ jitterBufferDelayMs,
+ playoutBufferDelayMs,
+ avSyncOffsetMs);
+}
+
+bool WebrtcAudioConduit::GetRTPStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) {
+ unsigned int maxJitterMs = 0;
+ unsigned int discardedPackets;
+ *jitterMs = 0;
+ *cumulativeLost = 0;
+ return !mPtrRTP->GetRTPStatistics(mChannel, *jitterMs, maxJitterMs,
+ discardedPackets, *cumulativeLost);
+}
+
+DOMHighResTimeStamp
+NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow) {
+ return (uint32_t(ntpHigh - webrtc::kNtpJan1970) +
+ double(ntpLow) / webrtc::kMagicNtpFractionalUnit) * 1000;
+}
+
+bool WebrtcAudioConduit::GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ int32_t* rttMs) {
+ uint32_t ntpHigh, ntpLow;
+ uint16_t fractionLost;
+ bool result = !mPtrRTP->GetRemoteRTCPReceiverInfo(mChannel, ntpHigh, ntpLow,
+ *packetsReceived,
+ *bytesReceived,
+ *jitterMs,
+ fractionLost,
+ *cumulativeLost,
+ *rttMs);
+ if (result) {
+ *timestamp = NTPtoDOMHighResTimeStamp(ntpHigh, ntpLow);
+ }
+ return result;
+}
+
+bool WebrtcAudioConduit::GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
+ unsigned int* packetsSent,
+ uint64_t* bytesSent) {
+ webrtc::RTCPSenderInfo senderInfo;
+ webrtc::RtpRtcp * rtpRtcpModule;
+ webrtc::RtpReceiver * rtp_receiver;
+ bool result =
+ !mPtrVoEVideoSync->GetRtpRtcp(mChannel,&rtpRtcpModule,&rtp_receiver) &&
+ !rtpRtcpModule->RemoteRTCPStat(&senderInfo);
+ if (result){
+ *timestamp = NTPtoDOMHighResTimeStamp(senderInfo.NTPseconds,
+ senderInfo.NTPfraction);
+ *packetsSent = senderInfo.sendPacketCount;
+ *bytesSent = senderInfo.sendOctetCount;
+ }
+ return result;
+ }
+
+bool WebrtcAudioConduit::SetDtmfPayloadType(unsigned char type) {
+ CSFLogInfo(logTag, "%s : setting dtmf payload %d", __FUNCTION__, (int)type);
+
+ ScopedCustomReleasePtr<webrtc::VoEDtmf> mPtrVoEDtmf;
+ mPtrVoEDtmf = webrtc::VoEDtmf::GetInterface(mVoiceEngine);
+ if (!mPtrVoEDtmf) {
+ CSFLogError(logTag, "%s Unable to initialize VoEDtmf", __FUNCTION__);
+ return false;
+ }
+
+ int result = mPtrVoEDtmf->SetSendTelephoneEventPayloadType(mChannel, type);
+ if (result == -1) {
+ CSFLogError(logTag, "%s Failed call to SetSendTelephoneEventPayloadType",
+ __FUNCTION__);
+ }
+ return result != -1;
+}
+
+bool WebrtcAudioConduit::InsertDTMFTone(int channel, int eventCode,
+ bool outOfBand, int lengthMs,
+ int attenuationDb) {
+ NS_ASSERTION(!NS_IsMainThread(), "Do not call on main thread");
+
+ if (!mVoiceEngine || !mDtmfEnabled) {
+ return false;
+ }
+
+ webrtc::VoiceEngineImpl* s = static_cast<webrtc::VoiceEngineImpl*>(mVoiceEngine);
+ int result = s->SendTelephoneEvent(channel, eventCode, outOfBand, lengthMs, attenuationDb);
+ return result != -1;
+}
+
+/*
+ * WebRTCAudioConduit Implementation
+ */
+MediaConduitErrorCode WebrtcAudioConduit::Init()
+{
+ CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this);
+
+#ifdef MOZ_WIDGET_ANDROID
+ jobject context = jsjni_GetGlobalContextRef();
+ // get the JVM
+ JavaVM *jvm = jsjni_GetVM();
+
+ if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
+ CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+#endif
+
+ // Per WebRTC APIs below function calls return nullptr on failure
+ if(!(mVoiceEngine = webrtc::VoiceEngine::Create()))
+ {
+ CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoENetwork", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+ if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+ if(!(mPtrVoERTP_RTCP = VoERTP_RTCP::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoERTP_RTCP", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if(!(mPtrVoEVideoSync = VoEVideoSync::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to initialize VoEVideoSync", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+ if (!(mPtrRTP = webrtc::VoERTP_RTCP::GetInterface(mVoiceEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get audio RTP/RTCP interface ",
+ __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ // init the engine with our audio device layer
+ if(mPtrVoEBase->Init() == -1)
+ {
+ CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( (mChannel = mPtrVoEBase->CreateChannel()) == -1)
+ {
+ CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__);
+ return kMediaConduitChannelError;
+ }
+
+ CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel);
+
+ if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1)
+ {
+ CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__);
+ return kMediaConduitTransportRegistrationFail;
+ }
+
+ if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1)
+ {
+ CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__,
+ mPtrVoEBase->LastError());
+ return kMediaConduitExternalPlayoutError;
+ }
+
+ if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1)
+ {
+ CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__,
+ mPtrVoEBase->LastError());
+ return kMediaConduitExternalRecordingError;
+ }
+
+ CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this);
+ return kMediaConduitNoError;
+}
+
+// AudioSessionConduit Implementation
+MediaConduitErrorCode
+WebrtcAudioConduit::SetTransmitterTransport(RefPtr<TransportInterface> aTransport)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mTransmitterTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::SetReceiverTransport(RefPtr<TransportInterface> aTransport)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mReceiverTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+ int error = 0;//webrtc engine errors
+ webrtc::CodecInst cinst;
+
+ {
+ //validate codec param
+ if((condError = ValidateCodecConfig(codecConfig, true)) != kMediaConduitNoError)
+ {
+ return condError;
+ }
+ }
+
+ condError = StopTransmitting();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ if(!CodecConfigToWebRTCCodec(codecConfig,cinst))
+ {
+ CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ if(mPtrVoECodec->SetSendCodec(mChannel, cinst) == -1)
+ {
+ error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s SetSendCodec - Invalid Codec %d ",__FUNCTION__,
+ error);
+
+ if(error == VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR)
+ {
+ CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__);
+ return kMediaConduitInvalidSendCodec;
+ }
+ CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__,
+ mPtrVoEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+
+ // This must be called after SetSendCodec
+ if (mPtrVoECodec->SetFECStatus(mChannel, codecConfig->mFECEnabled) == -1) {
+ CSFLogError(logTag, "%s SetFECStatus Failed %d ", __FUNCTION__,
+ mPtrVoEBase->LastError());
+ return kMediaConduitFECStatusError;
+ }
+
+ mDtmfEnabled = codecConfig->mDtmfEnabled;
+
+ if (codecConfig->mName == "opus" && codecConfig->mMaxPlaybackRate) {
+ if (mPtrVoECodec->SetOpusMaxPlaybackRate(
+ mChannel,
+ codecConfig->mMaxPlaybackRate) == -1) {
+ CSFLogError(logTag, "%s SetOpusMaxPlaybackRate Failed %d ", __FUNCTION__,
+ mPtrVoEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // TEMPORARY - see bug 694814 comment 2
+ nsresult rv;
+ nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
+ if (NS_SUCCEEDED(rv)) {
+ nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
+
+ if (branch) {
+ branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay);
+ }
+ }
+#endif
+
+ condError = StartTransmitting();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ {
+ MutexAutoLock lock(mCodecMutex);
+
+ //Copy the applied config for future reference.
+ mCurSendCodecConfig = new AudioCodecConfig(codecConfig->mType,
+ codecConfig->mName,
+ codecConfig->mFreq,
+ codecConfig->mPacSize,
+ codecConfig->mChannels,
+ codecConfig->mRate,
+ codecConfig->mFECEnabled);
+ }
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::ConfigureRecvMediaCodecs(
+ const std::vector<AudioCodecConfig*>& codecConfigList)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+ int error = 0; //webrtc engine errors
+ bool success = false;
+
+ // Are we receiving already? If so, stop receiving and playout
+ // since we can't apply new recv codec when the engine is playing.
+ condError = StopReceiving();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ if(codecConfigList.empty())
+ {
+ CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Try Applying the codecs in the list.
+ // We succeed if at least one codec was applied and reception was
+ // started successfully.
+ for(std::vector<AudioCodecConfig*>::size_type i=0 ;i<codecConfigList.size();i++)
+ {
+ //if the codec param is invalid or diplicate, return error
+ if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError)
+ {
+ return condError;
+ }
+
+ webrtc::CodecInst cinst;
+ if(!CodecConfigToWebRTCCodec(codecConfigList[i],cinst))
+ {
+ CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__);
+ continue;
+ }
+
+ if(mPtrVoECodec->SetRecPayloadType(mChannel,cinst) == -1)
+ {
+ error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s SetRecvCodec Failed %d ",__FUNCTION__, error);
+ continue;
+ } else {
+ CSFLogDebug(logTag, "%s Successfully Set RecvCodec %s", __FUNCTION__,
+ codecConfigList[i]->mName.c_str());
+ //copy this to local database
+ if(CopyCodecToDB(codecConfigList[i]))
+ {
+ success = true;
+ } else {
+ CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__);
+ return kMediaConduitUnknownError;
+ }
+
+ }
+
+ } //end for
+
+ if(!success)
+ {
+ CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__);
+ return kMediaConduitInvalidReceiveCodec;
+ }
+
+ //If we are here, atleast one codec should have been set
+ condError = StartReceiving();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ DumpCodecDB();
+ return kMediaConduitNoError;
+}
+MediaConduitErrorCode
+WebrtcAudioConduit::EnableAudioLevelExtension(bool enabled, uint8_t id)
+{
+ CSFLogDebug(logTag, "%s %d %d ", __FUNCTION__, enabled, id);
+
+ if (mPtrVoERTP_RTCP->SetSendAudioLevelIndicationStatus(mChannel, enabled, id) == -1)
+ {
+ CSFLogError(logTag, "%s SetSendAudioLevelIndicationStatus Failed", __FUNCTION__);
+ return kMediaConduitUnknownError;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[],
+ int32_t lengthSamples,
+ int32_t samplingFreqHz,
+ int32_t capture_delay)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ // Following checks need to be performed
+ // 1. Non null audio buffer pointer,
+ // 2. invalid sampling frequency - less than 0 or unsupported ones
+ // 3. Appropriate Sample Length for 10 ms audio-frame. This represents
+ // block size the VoiceEngine feeds into encoder for passed in audio-frame
+ // Ex: for 16000 sampling rate , valid block-length is 160
+ // Similarly for 32000 sampling rate, valid block length is 320
+ // We do the check by the verify modular operator below to be zero
+
+ if(!audio_data || (lengthSamples <= 0) ||
+ (IsSamplingFreqSupported(samplingFreqHz) == false) ||
+ ((lengthSamples % (samplingFreqHz / 100) != 0)) )
+ {
+ CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ //validate capture time
+ if(capture_delay < 0 )
+ {
+ CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // if transmission is not started .. conduit cannot insert frames
+ if(!mEngineTransmitting)
+ {
+ CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
+ struct Processing insert = { TimeStamp::Now(), 0 };
+ mProcessing.AppendElement(insert);
+ }
+#endif
+
+ capture_delay = mCaptureDelay;
+ //Insert the samples
+ if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data,
+ lengthSamples,
+ samplingFreqHz,
+ capture_delay) == -1)
+ {
+ int error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s Inserting audio data Failed %d", __FUNCTION__, error);
+ if(error == VE_RUNTIME_REC_ERROR)
+ {
+ return kMediaConduitRecordingError;
+ }
+ return kMediaConduitUnknownError;
+ }
+ // we should be good here
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ int& lengthSamples)
+{
+
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ unsigned int numSamples = 0;
+
+ //validate params
+ if(!speechData )
+ {
+ CSFLogError(logTag,"%s Null Audio Buffer Pointer", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ // Validate sample length
+ if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0 )
+ {
+ CSFLogError(logTag,"%s Invalid Sampling Frequency ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ //validate capture time
+ if(capture_delay < 0 )
+ {
+ CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__);
+ MOZ_ASSERT(PR_FALSE);
+ return kMediaConduitMalformedArgument;
+ }
+
+ //Conduit should have reception enabled before we ask for decoded
+ // samples
+ if(!mEngineReceiving)
+ {
+ CSFLogError(logTag, "%s Engine not Receiving ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+
+ lengthSamples = 0; //output paramter
+
+ if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData,
+ samplingFreqHz,
+ capture_delay,
+ lengthSamples) == -1)
+ {
+ int error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s Getting audio data Failed %d", __FUNCTION__, error);
+ if(error == VE_RUNTIME_PLAY_ERROR)
+ {
+ return kMediaConduitPlayoutError;
+ }
+ return kMediaConduitUnknownError;
+ }
+
+ // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc
+ mSamples += lengthSamples;
+ if (mSamples >= mLastSyncLog + samplingFreqHz) {
+ int jitter_buffer_delay_ms;
+ int playout_buffer_delay_ms;
+ int avsync_offset_ms;
+ if (GetAVStats(&jitter_buffer_delay_ms,
+ &playout_buffer_delay_ms,
+ &avsync_offset_ms)) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (avsync_offset_ms < 0) {
+ Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS,
+ -avsync_offset_ms);
+ } else {
+ Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS,
+ avsync_offset_ms);
+ }
+#endif
+ CSFLogError(logTag,
+ "A/V sync: sync delta: %dms, audio jitter delay %dms, playout delay %dms",
+ avsync_offset_ms, jitter_buffer_delay_ms, playout_buffer_delay_ms);
+ } else {
+ CSFLogError(logTag, "A/V sync: GetAVStats failed");
+ }
+ mLastSyncLog = mSamples;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
+ if (mProcessing.Length() > 0) {
+ unsigned int now;
+ mPtrVoEVideoSync->GetPlayoutTimestamp(mChannel, now);
+ if (static_cast<uint32_t>(now) != mLastTimestamp) {
+ mLastTimestamp = static_cast<uint32_t>(now);
+ // Find the block that includes this timestamp in the network input
+ while (mProcessing.Length() > 0) {
+ // FIX! assumes 20ms @ 48000Hz
+ // FIX handle wrap-around
+ if (mProcessing[0].mRTPTimeStamp + 20*(48000/1000) >= now) {
+ TimeDuration t = TimeStamp::Now() - mProcessing[0].mTimeStamp;
+ // Wrap-around?
+ int64_t delta = t.ToMilliseconds() + (now - mProcessing[0].mRTPTimeStamp)/(48000/1000);
+ LogTime(AsyncLatencyLogger::AudioRecvRTP, ((uint64_t) this), delta);
+ break;
+ }
+ mProcessing.RemoveElementAt(0);
+ }
+ }
+ }
+ }
+#endif
+ CSFLogDebug(logTag,"%s GetAudioFrame:Got samples: length %d ",__FUNCTION__,
+ lengthSamples);
+ return kMediaConduitNoError;
+}
+
+// Transport Layer Callbacks
+MediaConduitErrorCode
+WebrtcAudioConduit::ReceivedRTPPacket(const void *data, int len)
+{
+ CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, mChannel);
+
+ if(mEngineReceiving)
+ {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
+ // timestamp is at 32 bits in ([1])
+ struct Processing insert = { TimeStamp::Now(),
+ ntohl(static_cast<const uint32_t *>(data)[1]) };
+ mProcessing.AppendElement(insert);
+ }
+#endif
+
+ // XXX we need to get passed the time the packet was received
+ if(mPtrVoENetwork->ReceivedRTPPacket(mChannel, data, len) == -1)
+ {
+ int error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s RTP Processing Error %d", __FUNCTION__, error);
+ if(error == VE_RTP_RTCP_MODULE_ERROR)
+ {
+ return kMediaConduitRTPRTCPModuleError;
+ }
+ return kMediaConduitUnknownError;
+ }
+ } else {
+ CSFLogError(logTag, "Error: %s when not receiving", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::ReceivedRTCPPacket(const void *data, int len)
+{
+ CSFLogDebug(logTag, "%s : channel %d",__FUNCTION__, mChannel);
+
+ if(mPtrVoENetwork->ReceivedRTCPPacket(mChannel, data, len) == -1)
+ {
+ int error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s RTCP Processing Error %d", __FUNCTION__, error);
+ if(error == VE_RTP_RTCP_MODULE_ERROR)
+ {
+ return kMediaConduitRTPRTCPModuleError;
+ }
+ return kMediaConduitUnknownError;
+ }
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::StopTransmitting()
+{
+ if(mEngineTransmitting)
+ {
+ CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__);
+ if(mPtrVoEBase->StopSend(mChannel) == -1)
+ {
+ CSFLogError(logTag, "%s StopSend() Failed %d ", __FUNCTION__,
+ mPtrVoEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+ mEngineTransmitting = false;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::StartTransmitting()
+{
+ if (!mEngineTransmitting) {
+ //Let's Send Transport State-machine on the Engine
+ if(mPtrVoEBase->StartSend(mChannel) == -1)
+ {
+ int error = mPtrVoEBase->LastError();
+ CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error);
+ return kMediaConduitUnknownError;
+ }
+ mEngineTransmitting = true;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::StopReceiving()
+{
+ if(mEngineReceiving)
+ {
+ CSFLogDebug(logTag, "%s Engine Already Receiving. Attemping to Stop ", __FUNCTION__);
+ // AudioEngine doesn't fail fatally on stopping reception. Ref:voe_errors.h.
+ // hence we need not be strict in failing here on errors
+ mPtrVoEBase->StopReceive(mChannel);
+ CSFLogDebug(logTag, "%s Attemping to Stop playout ", __FUNCTION__);
+ if(mPtrVoEBase->StopPlayout(mChannel) == -1)
+ {
+ if( mPtrVoEBase->LastError() == VE_CANNOT_STOP_PLAYOUT)
+ {
+ CSFLogDebug(logTag, "%s Stop-Playout Failed %d", __FUNCTION__, mPtrVoEBase->LastError());
+ return kMediaConduitPlayoutError;
+ }
+ }
+ mEngineReceiving = false;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcAudioConduit::StartReceiving()
+{
+ if (!mEngineReceiving) {
+ if(mPtrVoEBase->StartReceive(mChannel) == -1)
+ {
+ int error = mPtrVoEBase->LastError();
+ CSFLogError(logTag , "%s StartReceive Failed %d ",__FUNCTION__, error);
+ if(error == VE_RECV_SOCKET_ERROR)
+ {
+ return kMediaConduitSocketError;
+ }
+ return kMediaConduitUnknownError;
+ }
+
+
+ if(mPtrVoEBase->StartPlayout(mChannel) == -1)
+ {
+ CSFLogError(logTag, "%s Starting playout Failed", __FUNCTION__);
+ return kMediaConduitPlayoutError;
+ }
+ mEngineReceiving = true;
+ }
+
+ return kMediaConduitNoError;
+}
+
+//WebRTC::RTP Callback Implementation
+// Called on AudioGUM or MSG thread
+int WebrtcAudioConduit::SendPacket(int channel, const void* data, size_t len)
+{
+ CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, channel);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
+ if (mProcessing.Length() > 0) {
+ TimeStamp started = mProcessing[0].mTimeStamp;
+ mProcessing.RemoveElementAt(0);
+ mProcessing.RemoveElementAt(0); // 20ms packetization! Could automate this by watching sizes
+ TimeDuration t = TimeStamp::Now() - started;
+ int64_t delta = t.ToMilliseconds();
+ LogTime(AsyncLatencyLogger::AudioSendRTP, ((uint64_t) this), delta);
+ }
+ }
+#endif
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mTransmitterTransport &&
+ (mTransmitterTransport->SendRtpPacket(data, len) == NS_OK))
+ {
+ CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__);
+ return len;
+ } else {
+ CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
+ return -1;
+ }
+}
+
+// Called on WebRTC Process thread and perhaps others
+int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, size_t len)
+{
+ CSFLogDebug(logTag, "%s : channel %d , len %lu, first rtcp = %u ",
+ __FUNCTION__,
+ channel,
+ (unsigned long) len,
+ static_cast<unsigned>(((uint8_t *) data)[1]));
+
+ // We come here if we have only one pipeline/conduit setup,
+ // such as for unidirectional streams.
+ // We also end up here if we are receiving
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mReceiverTransport &&
+ mReceiverTransport->SendRtcpPacket(data, len) == NS_OK)
+ {
+ // Might be a sender report, might be a receiver report, we don't know.
+ CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__);
+ return len;
+ } else if(mTransmitterTransport &&
+ (mTransmitterTransport->SendRtcpPacket(data, len) == NS_OK)) {
+ CSFLogDebug(logTag, "%s Sent RTCP Packet (sender report) ", __FUNCTION__);
+ return len;
+ } else {
+ CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__);
+ return -1;
+ }
+}
+
+/**
+ * Converts between CodecConfig to WebRTC Codec Structure.
+ */
+
+bool
+WebrtcAudioConduit::CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo,
+ webrtc::CodecInst& cinst)
+ {
+ const unsigned int plNameLength = codecInfo->mName.length();
+ memset(&cinst, 0, sizeof(webrtc::CodecInst));
+ if(sizeof(cinst.plname) < plNameLength+1)
+ {
+ CSFLogError(logTag, "%s Payload name buffer capacity mismatch ",
+ __FUNCTION__);
+ return false;
+ }
+ memcpy(cinst.plname, codecInfo->mName.c_str(), plNameLength);
+ cinst.plname[plNameLength]='\0';
+ cinst.pltype = codecInfo->mType;
+ cinst.rate = codecInfo->mRate;
+ cinst.pacsize = codecInfo->mPacSize;
+ cinst.plfreq = codecInfo->mFreq;
+ if (codecInfo->mName == "G722") {
+ // Compensate for G.722 spec error in RFC 1890
+ cinst.plfreq = 16000;
+ }
+ cinst.channels = codecInfo->mChannels;
+ return true;
+ }
+
+/**
+ * Supported Sampling Frequncies.
+ */
+bool
+WebrtcAudioConduit::IsSamplingFreqSupported(int freq) const
+{
+ if(GetNum10msSamplesForFrequency(freq))
+ {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Return block-length of 10 ms audio frame in number of samples */
+unsigned int
+WebrtcAudioConduit::GetNum10msSamplesForFrequency(int samplingFreqHz) const
+{
+ switch(samplingFreqHz)
+ {
+ case 16000: return 160; //160 samples
+ case 32000: return 320; //320 samples
+ case 44100: return 441; //441 samples
+ case 48000: return 480; //480 samples
+ default: return 0; // invalid or unsupported
+ }
+}
+
+//Copy the codec passed into Conduit's database
+bool
+WebrtcAudioConduit::CopyCodecToDB(const AudioCodecConfig* codecInfo)
+{
+
+ AudioCodecConfig* cdcConfig = new AudioCodecConfig(codecInfo->mType,
+ codecInfo->mName,
+ codecInfo->mFreq,
+ codecInfo->mPacSize,
+ codecInfo->mChannels,
+ codecInfo->mRate,
+ codecInfo->mFECEnabled);
+ mRecvCodecList.push_back(cdcConfig);
+ return true;
+}
+
+/**
+ * Checks if 2 codec structs are same
+ */
+bool
+WebrtcAudioConduit::CheckCodecsForMatch(const AudioCodecConfig* curCodecConfig,
+ const AudioCodecConfig* codecInfo) const
+{
+ if(!curCodecConfig)
+ {
+ return false;
+ }
+
+ if(curCodecConfig->mType == codecInfo->mType &&
+ (curCodecConfig->mName.compare(codecInfo->mName) == 0) &&
+ curCodecConfig->mFreq == codecInfo->mFreq &&
+ curCodecConfig->mPacSize == codecInfo->mPacSize &&
+ curCodecConfig->mChannels == codecInfo->mChannels &&
+ curCodecConfig->mRate == codecInfo->mRate)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Checks if the codec is already in Conduit's database
+ */
+bool
+WebrtcAudioConduit::CheckCodecForMatch(const AudioCodecConfig* codecInfo) const
+{
+ //the db should have atleast one codec
+ for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++)
+ {
+ if(CheckCodecsForMatch(mRecvCodecList[i],codecInfo))
+ {
+ //match
+ return true;
+ }
+ }
+ //no match or empty local db
+ return false;
+}
+
+
+/**
+ * Perform validation on the codecConfig to be applied.
+ * Verifies if the codec is already applied.
+ */
+MediaConduitErrorCode
+WebrtcAudioConduit::ValidateCodecConfig(const AudioCodecConfig* codecInfo,
+ bool send)
+{
+ bool codecAppliedAlready = false;
+
+ if(!codecInfo)
+ {
+ CSFLogError(logTag, "%s Null CodecConfig ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ if((codecInfo->mName.empty()) ||
+ (codecInfo->mName.length() >= CODEC_PLNAME_SIZE))
+ {
+ CSFLogError(logTag, "%s Invalid Payload Name Length ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ //Only mono or stereo channels supported
+ if( (codecInfo->mChannels != 1) && (codecInfo->mChannels != 2))
+ {
+ CSFLogError(logTag, "%s Channel Unsupported ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ //check if we have the same codec already applied
+ if(send)
+ {
+ MutexAutoLock lock(mCodecMutex);
+
+ codecAppliedAlready = CheckCodecsForMatch(mCurSendCodecConfig,codecInfo);
+ } else {
+ codecAppliedAlready = CheckCodecForMatch(codecInfo);
+ }
+
+ if(codecAppliedAlready)
+ {
+ CSFLogDebug(logTag, "%s Codec %s Already Applied ", __FUNCTION__, codecInfo->mName.c_str());
+ }
+ return kMediaConduitNoError;
+}
+
+void
+WebrtcAudioConduit::DumpCodecDB() const
+ {
+ for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++)
+ {
+ CSFLogDebug(logTag,"Payload Name: %s", mRecvCodecList[i]->mName.c_str());
+ CSFLogDebug(logTag,"Payload Type: %d", mRecvCodecList[i]->mType);
+ CSFLogDebug(logTag,"Payload Frequency: %d", mRecvCodecList[i]->mFreq);
+ CSFLogDebug(logTag,"Payload PacketSize: %d", mRecvCodecList[i]->mPacSize);
+ CSFLogDebug(logTag,"Payload Channels: %d", mRecvCodecList[i]->mChannels);
+ CSFLogDebug(logTag,"Payload Sampling Rate: %d", mRecvCodecList[i]->mRate);
+ }
+ }
+}// end namespace
diff --git a/media/webrtc/signaling/src/media-conduit/AudioConduit.h b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
new file mode 100755
index 000000000..228736dcc
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -0,0 +1,304 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+#ifndef AUDIO_SESSION_H_
+#define AUDIO_SESSION_H_
+
+#include "mozilla/Attributes.h"
+#include "mozilla/TimeStamp.h"
+#include "nsTArray.h"
+
+#include "MediaConduitInterface.h"
+#include "MediaEngineWrapper.h"
+
+// Audio Engine Includes
+#include "webrtc/common_types.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+#include "webrtc/voice_engine/include/voe_volume_control.h"
+#include "webrtc/voice_engine/include/voe_codec.h"
+#include "webrtc/voice_engine/include/voe_file.h"
+#include "webrtc/voice_engine/include/voe_network.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
+#include "webrtc/voice_engine/include/voe_video_sync.h"
+#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
+//Some WebRTC types for short notations
+ using webrtc::VoEBase;
+ using webrtc::VoENetwork;
+ using webrtc::VoECodec;
+ using webrtc::VoEExternalMedia;
+ using webrtc::VoEAudioProcessing;
+ using webrtc::VoEVideoSync;
+ using webrtc::VoERTP_RTCP;
+/** This file hosts several structures identifying different aspects
+ * of a RTP Session.
+ */
+namespace mozilla {
+// Helper function
+
+DOMHighResTimeStamp
+NTPtoDOMHighResTimeStamp(uint32_t ntpHigh, uint32_t ntpLow);
+
+/**
+ * Concrete class for Audio session. Hooks up
+ * - media-source and target to external transport
+ */
+class WebrtcAudioConduit:public AudioSessionConduit
+ ,public webrtc::Transport
+{
+public:
+ //VoiceEngine defined constant for Payload Name Size.
+ static const unsigned int CODEC_PLNAME_SIZE;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTP Frames to the VoiceEngine for decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTPPacket(const void *data, int len) override;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTCP Frames to the VoiceEngine for decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTCPPacket(const void *data, int len) override;
+
+ virtual MediaConduitErrorCode StopTransmitting() override;
+ virtual MediaConduitErrorCode StartTransmitting() override;
+ virtual MediaConduitErrorCode StopReceiving() override;
+ virtual MediaConduitErrorCode StartReceiving() override;
+
+ /**
+ * Function to configure send codec for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the audio engine is configured with passed in codec for send
+ * On failure, audio engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
+ * transmission sub-system on the engine.
+ */
+ virtual MediaConduitErrorCode ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) override;
+ /**
+ * Function to configure list of receive codecs for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the audio engine is configured with passed in codec for send
+ * Also the playout is enabled.
+ * On failure, audio engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
+ * transmission sub-system on the engine.
+ */
+ virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<AudioCodecConfig* >& codecConfigList) override;
+ /**
+ * Function to enable the audio level extension
+ * @param enabled: enable extension
+ */
+ virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) override;
+
+ /**
+ * Register External Transport to this Conduit. RTP and RTCP frames from the VoiceEngine
+ * shall be passed to the registered transport for transporting externally.
+ */
+ virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) override;
+
+ virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) override;
+
+ /**
+ * Function to deliver externally captured audio sample for encoding and transport
+ * @param audioData [in]: Pointer to array containing a frame of audio
+ * @param lengthSamples [in]: Length of audio frame in samples in multiple of 10 milliseconds
+ * Ex: Frame length is 160, 320, 440 for 16, 32, 44 kHz sampling rates
+ respectively.
+ audioData[] should be of lengthSamples in size
+ say, for 16kz sampling rate, audioData[] should contain 160
+ samples of 16-bits each for a 10m audio frame.
+ * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz ( 16000, 32000 ...)
+ * @param capture_delay [in]: Approx Delay from recording until it is delivered to VoiceEngine
+ in milliseconds.
+ * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked
+ * This ensures the inserted audio-samples can be transmitted by the conduit
+ *
+ */
+ virtual MediaConduitErrorCode SendAudioFrame(const int16_t speechData[],
+ int32_t lengthSamples,
+ int32_t samplingFreqHz,
+ int32_t capture_time) override;
+
+ /**
+ * Function to grab a decoded audio-sample from the media engine for rendering
+ * / playoutof length 10 milliseconds.
+ *
+ * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
+ * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
+ * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback
+ * @param lengthSamples [out]: Will contain length of the audio frame in samples at return.
+ Ex: A value of 160 implies 160 samples each of 16-bits was copied
+ into speechData
+ * NOTE: This function should be invoked every 10 milliseconds for the best
+ * peformance
+ * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked
+ * This ensures the decoded samples are ready for reading and playout is enabled.
+ *
+ */
+ virtual MediaConduitErrorCode GetAudioFrame(int16_t speechData[],
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ int& lengthSamples) override;
+
+
+ /**
+ * Webrtc transport implementation to send and receive RTP packet.
+ * AudioConduit registers itself as ExternalTransport to the VoiceEngine
+ */
+ virtual int SendPacket(int channel, const void *data, size_t len) override;
+
+ /**
+ * Webrtc transport implementation to send and receive RTCP packet.
+ * AudioConduit registers itself as ExternalTransport to the VoiceEngine
+ */
+ virtual int SendRTCPPacket(int channel, const void *data, size_t len) override;
+
+
+ virtual uint64_t CodecPluginID() override { return 0; }
+
+ WebrtcAudioConduit():
+ mVoiceEngine(nullptr),
+ mTransportMonitor("WebrtcAudioConduit"),
+ mTransmitterTransport(nullptr),
+ mReceiverTransport(nullptr),
+ mEngineTransmitting(false),
+ mEngineReceiving(false),
+ mChannel(-1),
+ mDtmfEnabled(false),
+ mCodecMutex("AudioConduit codec db"),
+ mCaptureDelay(150),
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ mLastTimestamp(0),
+#endif // MOZILLA_INTERNAL_API
+ mSamples(0),
+ mLastSyncLog(0)
+ {
+ }
+
+ virtual ~WebrtcAudioConduit();
+
+ MediaConduitErrorCode Init();
+
+ int GetChannel() { return mChannel; }
+ webrtc::VoiceEngine* GetVoiceEngine() { return mVoiceEngine; }
+ bool SetLocalSSRC(unsigned int ssrc) override;
+ bool GetLocalSSRC(unsigned int* ssrc) override;
+ bool GetRemoteSSRC(unsigned int* ssrc) override;
+ bool SetLocalCNAME(const char* cname) override;
+ bool GetVideoEncoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* droppedFrames) override
+ {
+ return false;
+ }
+ bool GetVideoDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* discardedPackets) override
+ {
+ return false;
+ }
+ bool GetAVStats(int32_t* jitterBufferDelayMs,
+ int32_t* playoutBufferDelayMs,
+ int32_t* avSyncOffsetMs) override;
+ bool GetRTPStats(unsigned int* jitterMs, unsigned int* cumulativeLost) override;
+ bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t *cumulativeLost,
+ int32_t* rttMs) override;
+ bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
+ unsigned int* packetsSent,
+ uint64_t* bytesSent) override;
+
+ bool SetDtmfPayloadType(unsigned char type) override;
+
+ bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
+ int lengthMs, int attenuationDb) override;
+
+private:
+ WebrtcAudioConduit(const WebrtcAudioConduit& other) = delete;
+ void operator=(const WebrtcAudioConduit& other) = delete;
+
+ //Local database of currently applied receive codecs
+ typedef std::vector<AudioCodecConfig* > RecvCodecList;
+
+ //Function to convert between WebRTC and Conduit codec structures
+ bool CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo,
+ webrtc::CodecInst& cinst);
+
+ //Checks if given sampling frequency is supported
+ bool IsSamplingFreqSupported(int freq) const;
+
+ //Generate block size in sample lenght for a given sampling frequency
+ unsigned int GetNum10msSamplesForFrequency(int samplingFreqHz) const;
+
+ // Function to copy a codec structure to Conduit's database
+ bool CopyCodecToDB(const AudioCodecConfig* codecInfo);
+
+ // Functions to verify if the codec passed is already in
+ // conduits database
+ bool CheckCodecForMatch(const AudioCodecConfig* codecInfo) const;
+ bool CheckCodecsForMatch(const AudioCodecConfig* curCodecConfig,
+ const AudioCodecConfig* codecInfo) const;
+ //Checks the codec to be applied
+ MediaConduitErrorCode ValidateCodecConfig(const AudioCodecConfig* codecInfo, bool send);
+
+ //Utility function to dump recv codec database
+ void DumpCodecDB() const;
+
+ webrtc::VoiceEngine* mVoiceEngine;
+ mozilla::ReentrantMonitor mTransportMonitor;
+ RefPtr<TransportInterface> mTransmitterTransport;
+ RefPtr<TransportInterface> mReceiverTransport;
+ ScopedCustomReleasePtr<webrtc::VoENetwork> mPtrVoENetwork;
+ ScopedCustomReleasePtr<webrtc::VoEBase> mPtrVoEBase;
+ ScopedCustomReleasePtr<webrtc::VoECodec> mPtrVoECodec;
+ ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mPtrVoEXmedia;
+ ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mPtrVoEProcessing;
+ ScopedCustomReleasePtr<webrtc::VoEVideoSync> mPtrVoEVideoSync;
+ ScopedCustomReleasePtr<webrtc::VoERTP_RTCP> mPtrVoERTP_RTCP;
+ ScopedCustomReleasePtr<webrtc::VoERTP_RTCP> mPtrRTP;
+ //engine states of our interets
+ mozilla::Atomic<bool> mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up
+ mozilla::Atomic<bool> mEngineReceiving; // If true => VoiceEngine Receive-subsystem is up
+ // and playout is enabled
+ // Keep track of each inserted RTP block and the time it was inserted
+ // so we can estimate the clock time for a specific TimeStamp coming out
+ // (for when we send data to MediaStreamTracks). Blocks are aged out as needed.
+ struct Processing {
+ TimeStamp mTimeStamp;
+ uint32_t mRTPTimeStamp; // RTP timestamps received
+ };
+ AutoTArray<Processing,8> mProcessing;
+
+ int mChannel;
+ bool mDtmfEnabled;
+ RecvCodecList mRecvCodecList;
+
+ Mutex mCodecMutex; // protects mCurSendCodecConfig
+ nsAutoPtr<AudioCodecConfig> mCurSendCodecConfig;
+
+ // Current "capture" delay (really output plus input delay)
+ int32_t mCaptureDelay;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ uint32_t mLastTimestamp;
+#endif // MOZILLA_INTERNAL_API
+
+ uint32_t mSamples;
+ uint32_t mLastSyncLog;
+};
+
+} // end namespace
+
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/CodecConfig.h b/media/webrtc/signaling/src/media-conduit/CodecConfig.h
new file mode 100755
index 000000000..308c97948
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/CodecConfig.h
@@ -0,0 +1,166 @@
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CODEC_CONFIG_H_
+#define CODEC_CONFIG_H_
+
+#include <string>
+#include <vector>
+
+#include "signaling/src/common/EncodingConstraints.h"
+
+namespace mozilla {
+
+/**
+ * Minimalistic Audio Codec Config Params
+ */
+struct AudioCodecConfig
+{
+ /*
+ * The data-types for these properties mimic the
+ * corresponding webrtc::CodecInst data-types.
+ */
+ int mType;
+ std::string mName;
+ int mFreq;
+ int mPacSize;
+ int mChannels;
+ int mRate;
+
+ bool mFECEnabled;
+ bool mDtmfEnabled;
+
+ // OPUS-specific
+ int mMaxPlaybackRate;
+
+ /* Default constructor is not provided since as a consumer, we
+ * can't decide the default configuration for the codec
+ */
+ explicit AudioCodecConfig(int type, std::string name,
+ int freq, int pacSize,
+ int channels, int rate, bool FECEnabled)
+ : mType(type),
+ mName(name),
+ mFreq(freq),
+ mPacSize(pacSize),
+ mChannels(channels),
+ mRate(rate),
+ mFECEnabled(FECEnabled),
+ mDtmfEnabled(false),
+ mMaxPlaybackRate(0)
+ {
+ }
+};
+
+/*
+ * Minimalistic video codec configuration
+ * More to be added later depending on the use-case
+ */
+
+#define MAX_SPROP_LEN 128
+
+// used for holding SDP negotiation results
+struct VideoCodecConfigH264
+{
+ char sprop_parameter_sets[MAX_SPROP_LEN];
+ int packetization_mode;
+ int profile_level_id;
+ int tias_bw;
+};
+
+
+// class so the std::strings can get freed more easily/reliably
+class VideoCodecConfig
+{
+public:
+ /*
+ * The data-types for these properties mimic the
+ * corresponding webrtc::VideoCodec data-types.
+ */
+ int mType; // payload type
+ std::string mName;
+
+ std::vector<std::string> mAckFbTypes;
+ std::vector<std::string> mNackFbTypes;
+ std::vector<std::string> mCcmFbTypes;
+ // Don't pass mOtherFbTypes from JsepVideoCodecDescription because we'd have
+ // to drag SdpRtcpFbAttributeList::Feedback along too.
+ bool mRembFbSet;
+ bool mFECFbSet;
+
+ EncodingConstraints mEncodingConstraints;
+ struct SimulcastEncoding {
+ std::string rid;
+ EncodingConstraints constraints;
+ };
+ std::vector<SimulcastEncoding> mSimulcastEncodings;
+ std::string mSpropParameterSets;
+ uint8_t mProfile;
+ uint8_t mConstraints;
+ uint8_t mLevel;
+ uint8_t mPacketizationMode;
+ // TODO: add external negotiated SPS/PPS
+
+ VideoCodecConfig(int type,
+ std::string name,
+ const EncodingConstraints& constraints,
+ const struct VideoCodecConfigH264 *h264 = nullptr) :
+ mType(type),
+ mName(name),
+ mFECFbSet(false),
+ mEncodingConstraints(constraints),
+ mProfile(0x42),
+ mConstraints(0xE0),
+ mLevel(0x0C),
+ mPacketizationMode(1)
+ {
+ if (h264) {
+ mProfile = (h264->profile_level_id & 0x00FF0000) >> 16;
+ mConstraints = (h264->profile_level_id & 0x0000FF00) >> 8;
+ mLevel = (h264->profile_level_id & 0x000000FF);
+ mPacketizationMode = h264->packetization_mode;
+ mSpropParameterSets = h264->sprop_parameter_sets;
+ }
+ }
+
+ // Nothing seems to use this right now. Do we intend to support this
+ // someday?
+ bool RtcpFbAckIsSet(const std::string& type) const
+ {
+ for (auto i = mAckFbTypes.begin(); i != mAckFbTypes.end(); ++i) {
+ if (*i == type) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool RtcpFbNackIsSet(const std::string& type) const
+ {
+ for (auto i = mNackFbTypes.begin(); i != mNackFbTypes.end(); ++i) {
+ if (*i == type) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool RtcpFbCcmIsSet(const std::string& type) const
+ {
+ for (auto i = mCcmFbTypes.begin(); i != mCcmFbTypes.end(); ++i) {
+ if (*i == type) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool RtcpFbRembIsSet() const { return mRembFbSet; }
+
+ bool RtcpFbFECIsSet() const { return mFECFbSet; }
+
+};
+}
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp b/media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp
new file mode 100644
index 000000000..eb03c0bf8
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/CodecStatistics.cpp
@@ -0,0 +1,183 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CodecStatistics.h"
+
+#include "CSFLog.h"
+#include "mozilla/Telemetry.h"
+
+using namespace mozilla;
+using namespace webrtc;
+
+// use the same tag as VideoConduit
+static const char* logTag ="WebrtcVideoSessionConduit";
+
+VideoCodecStatistics::VideoCodecStatistics(int channel,
+ ViECodec* codec) :
+ mChannel(channel),
+ mSentRawFrames(0),
+ mPtrViECodec(codec),
+ mEncoderDroppedFrames(0),
+ mDecoderDiscardedPackets(0),
+ mRegisteredEncode(false),
+ mRegisteredDecode(false),
+ mReceiveState(kReceiveStateInitial)
+#ifdef MOZILLA_INTERNAL_API
+ , mRecoveredBeforeLoss(0)
+ , mRecoveredLosses(0)
+#endif
+{
+ MOZ_ASSERT(mPtrViECodec);
+}
+
+VideoCodecStatistics::~VideoCodecStatistics()
+{
+ if (mRegisteredEncode) {
+ mPtrViECodec->DeregisterEncoderObserver(mChannel);
+ }
+ if (mRegisteredDecode) {
+ mPtrViECodec->DeregisterDecoderObserver(mChannel);
+ }
+}
+
+void VideoCodecStatistics::Register(bool encoder)
+{
+ if (encoder && !mRegisteredEncode) {
+ mPtrViECodec->RegisterEncoderObserver(mChannel, *this);
+ mRegisteredEncode = true;
+ } else if (!encoder && !mRegisteredDecode) {
+ mPtrViECodec->RegisterDecoderObserver(mChannel, *this);
+ mRegisteredDecode = true;
+ }
+}
+
+void VideoCodecStatistics::OutgoingRate(const int video_channel,
+ const uint32_t framerate,
+ const uint32_t bitrate)
+{
+ unsigned int keyFrames, deltaFrames;
+ mPtrViECodec->GetSendCodecStatistics(video_channel, keyFrames, deltaFrames);
+ uint32_t dropped = mSentRawFrames - (keyFrames + deltaFrames);
+ CSFLogDebug(logTag,
+ "encoder statistics - framerate: %u, bitrate: %u, dropped frames: %u",
+ framerate, bitrate, dropped);
+ mEncoderBitRate.Push(bitrate);
+ mEncoderFps.Push(framerate);
+ mEncoderDroppedFrames += dropped;
+}
+
+void VideoCodecStatistics::IncomingCodecChanged(const int video_channel,
+ const VideoCodec& video_codec)
+{
+ CSFLogDebug(logTag,
+ "channel %d change codec to \"%s\" ",
+ video_channel, video_codec.plName);
+}
+
+void VideoCodecStatistics::IncomingRate(const int video_channel,
+ const unsigned int framerate,
+ const unsigned int bitrate)
+{
+ unsigned int discarded = mPtrViECodec->GetDiscardedPackets(video_channel);
+ CSFLogDebug(logTag,
+ "decoder statistics - framerate: %u, bitrate: %u, discarded packets %u",
+ framerate, bitrate, discarded);
+ mDecoderBitRate.Push(bitrate);
+ mDecoderFps.Push(framerate);
+ mDecoderDiscardedPackets += discarded;
+}
+
+void VideoCodecStatistics::ReceiveStateChange(const int aChannel,
+ VideoReceiveState aState)
+{
+ CSFLogDebug(logTag,"New state for %d: %d (was %d)", aChannel, aState, mReceiveState);
+#ifdef MOZILLA_INTERNAL_API
+ if (mFirstDecodeTime.IsNull()) {
+ mFirstDecodeTime = TimeStamp::Now();
+ }
+ /*
+ * Invalid transitions:
+ * WaitingKey -> PreemptiveNACK
+ * DecodingWithErrors -> PreemptiveNACK
+ */
+
+ switch (mReceiveState) {
+ case kReceiveStateNormal:
+ case kReceiveStateInitial:
+ // in a normal state
+ if (aState != kReceiveStateNormal && aState != kReceiveStateInitial) {
+ // no longer in a normal state
+ if (aState != kReceiveStatePreemptiveNACK) {
+ mReceiveFailureTime = TimeStamp::Now();
+ }
+ } // else Normal<->Initial transition
+ break;
+ default:
+ // not in a normal state
+ if (aState == kReceiveStateNormal || aState == kReceiveStateInitial) {
+
+ if (mReceiveState == kReceiveStatePreemptiveNACK) {
+ mRecoveredBeforeLoss++;
+ CSFLogError(logTag, "Video error avoided by NACK recovery");
+ } else if (!mReceiveFailureTime.IsNull()) { // safety
+ TimeDuration timeDelta = TimeStamp::Now() - mReceiveFailureTime;
+ CSFLogError(logTag, "Video error duration: %u ms",
+ static_cast<uint32_t>(timeDelta.ToMilliseconds()));
+ Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_ERROR_RECOVERY_MS,
+ static_cast<uint32_t>(timeDelta.ToMilliseconds()));
+
+ mRecoveredLosses++; // to calculate losses per minute
+ mTotalLossTime += timeDelta; // To calculate % time in recovery
+ }
+ } // else non-Normal to different non-normal transition
+ break;
+ }
+
+#endif
+
+ mReceiveState = aState;
+}
+
+void VideoCodecStatistics::EndOfCallStats()
+{
+#ifdef MOZILLA_INTERNAL_API
+ if (!mFirstDecodeTime.IsNull()) {
+ TimeDuration callDelta = TimeStamp::Now() - mFirstDecodeTime;
+ if (callDelta.ToSeconds() != 0) {
+ uint32_t recovered_per_min = mRecoveredBeforeLoss/(callDelta.ToSeconds()/60);
+ CSFLogError(logTag, "Video recovery before error per min %u", recovered_per_min);
+ Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN,
+ recovered_per_min);
+ uint32_t err_per_min = mRecoveredLosses/(callDelta.ToSeconds()/60);
+ CSFLogError(logTag, "Video recovery after error per min %u", err_per_min);
+ Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN,
+ err_per_min);
+ float percent = (mTotalLossTime.ToSeconds()*100)/callDelta.ToSeconds();
+ CSFLogError(logTag, "Video error time percentage %f%%", percent);
+ Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_DECODE_ERROR_TIME_PERMILLE,
+ static_cast<uint32_t>(percent*10));
+ }
+ }
+#endif
+}
+
+void VideoCodecStatistics::SentFrame()
+{
+ mSentRawFrames++;
+}
+
+void VideoCodecStatistics::Dump()
+{
+ Dump(mEncoderBitRate, "encoder bitrate");
+ Dump(mEncoderFps, "encoder fps");
+ Dump(mDecoderBitRate, "decoder bitrate");
+ Dump(mDecoderFps, "decoder fps");
+}
+
+void VideoCodecStatistics::Dump(RunningStat& s, const char *name)
+{
+ CSFLogDebug(logTag,
+ "%s, mean: %f, variance: %f, standard deviation: %f",
+ name, s.Mean(), s.Variance(), s.StandardDeviation());
+}
diff --git a/media/webrtc/signaling/src/media-conduit/CodecStatistics.h b/media/webrtc/signaling/src/media-conduit/CodecStatistics.h
new file mode 100644
index 000000000..ab81a6f33
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/CodecStatistics.h
@@ -0,0 +1,111 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef CODEC_STATISTICS_H_
+#define CODEC_STATISTICS_H_
+#include <math.h>
+
+#include "nsTArray.h"
+#include "nsISupportsImpl.h"
+#include "mozilla/TimeStamp.h"
+#include "webrtc/common_types.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "MediaEngineWrapper.h"
+#include "RunningStat.h"
+
+namespace mozilla {
+
+// Statistics-gathering observer for Video Encoder and Decoder
+
+class VideoCodecStatistics : public webrtc::ViEEncoderObserver
+ , public webrtc::ViEDecoderObserver
+{
+public:
+ VideoCodecStatistics(int channel, webrtc::ViECodec* vieCodec);
+ ~VideoCodecStatistics();
+ void Register(bool encoder);
+
+ void SentFrame();
+ virtual void OutgoingRate(const int video_channel,
+ const unsigned int framerate, const unsigned int bitrate) override;
+
+ virtual void IncomingCodecChanged(const int video_channel,
+ const webrtc::VideoCodec& video_codec) override;
+
+ virtual void IncomingRate(const int video_channel,
+ const unsigned int framerate,
+ const unsigned int bitrate) override;
+
+ void ReceiveStateChange(const int video_channel, webrtc::VideoReceiveState state) override;
+
+ void EndOfCallStats();
+
+ virtual void RequestNewKeyFrame(const int video_channel) override {};
+
+ virtual void SuspendChange(int video_channel, bool is_suspended) override {};
+ virtual void DecoderTiming(int decode_ms,
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) override {}
+
+ bool GetEncoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* droppedFrames)
+ {
+ *framerateMean = mEncoderFps.Mean();
+ *framerateStdDev = mEncoderFps.StandardDeviation();
+ *bitrateMean = mEncoderBitRate.Mean();
+ *bitrateStdDev = mEncoderBitRate.StandardDeviation();
+ *droppedFrames = mEncoderDroppedFrames;
+ return true;
+ }
+
+ bool GetDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* discardedPackets)
+ {
+ *framerateMean = mDecoderFps.Mean();
+ *framerateStdDev = mDecoderFps.StandardDeviation();
+ *bitrateMean = mDecoderBitRate.Mean();
+ *bitrateStdDev = mDecoderBitRate.StandardDeviation();
+ *discardedPackets = mDecoderDiscardedPackets;
+ return true;
+ }
+
+ void Dump();
+private:
+ void Dump(RunningStat& s, const char *name);
+
+ int mChannel;
+ uint32_t mSentRawFrames;
+ ScopedCustomReleasePtr<webrtc::ViECodec> mPtrViECodec; // back-pointer
+
+ RunningStat mEncoderBitRate;
+ RunningStat mEncoderFps;
+ uint32_t mEncoderDroppedFrames;
+ RunningStat mDecoderBitRate;
+ RunningStat mDecoderFps;
+ uint32_t mDecoderDiscardedPackets;
+ bool mRegisteredEncode;
+ bool mRegisteredDecode;
+
+ webrtc::VideoReceiveState mReceiveState;
+#ifdef MOZILLA_INTERNAL_API
+ TimeStamp mFirstDecodeTime;
+ TimeStamp mReceiveFailureTime;
+ TimeDuration mTotalLossTime;
+ uint32_t mRecoveredBeforeLoss;
+ uint32_t mRecoveredLosses;
+#endif
+};
+
+}
+
+#endif //CODEC_STATISTICS_H_
diff --git a/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp
new file mode 100644
index 000000000..0c4d81e44
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.cpp
@@ -0,0 +1,18 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcGmpVideoCodec.h"
+#include "GmpVideoCodec.h"
+
+namespace mozilla {
+
+VideoEncoder* GmpVideoCodec::CreateEncoder() {
+ return static_cast<VideoEncoder*>(new WebrtcVideoEncoderProxy());
+}
+
+VideoDecoder* GmpVideoCodec::CreateDecoder() {
+ return static_cast<VideoDecoder*>(new WebrtcVideoDecoderProxy());
+}
+
+}
diff --git a/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h
new file mode 100644
index 000000000..340150409
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/GmpVideoCodec.h
@@ -0,0 +1,19 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GMPVIDEOCODEC_H_
+#define GMPVIDEOCODEC_H_
+
+#include "MediaConduitInterface.h"
+
+namespace mozilla {
+class GmpVideoCodec {
+ public:
+ static VideoEncoder* CreateEncoder();
+ static VideoDecoder* CreateDecoder();
+};
+
+}
+
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp
new file mode 100644
index 000000000..0c6c2fdde
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.cpp
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+#include "nspr.h"
+
+#include "WebrtcMediaCodecVP8VideoCodec.h"
+#include "MediaCodecVideoCodec.h"
+
+namespace mozilla {
+
+static const char* logTag ="MediaCodecVideoCodec";
+
+VideoEncoder* MediaCodecVideoCodec::CreateEncoder(CodecType aCodecType) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ if (aCodecType == CODEC_VP8) {
+ return new WebrtcMediaCodecVP8VideoEncoder();
+ }
+ return nullptr;
+}
+
+VideoDecoder* MediaCodecVideoCodec::CreateDecoder(CodecType aCodecType) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ if (aCodecType == CODEC_VP8) {
+ return new WebrtcMediaCodecVP8VideoDecoder();
+ }
+ return nullptr;
+}
+
+}
diff --git a/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h
new file mode 100644
index 000000000..50dde8211
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/MediaCodecVideoCodec.h
@@ -0,0 +1,31 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaCodecVideoCodec_h__
+#define MediaCodecVideoCodec_h__
+
+#include "MediaConduitInterface.h"
+
+namespace mozilla {
+class MediaCodecVideoCodec {
+ public:
+ enum CodecType {
+ CODEC_VP8,
+ };
+ /**
+ * Create encoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static VideoEncoder* CreateEncoder(CodecType aCodecType);
+
+ /**
+ * Create decoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static VideoDecoder* CreateDecoder(CodecType aCodecType);
+};
+
+}
+
+#endif // MediaCodecVideoCodec_h__
diff --git a/media/webrtc/signaling/src/media-conduit/MediaConduitErrors.h b/media/webrtc/signaling/src/media-conduit/MediaConduitErrors.h
new file mode 100755
index 000000000..3709d59a0
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitErrors.h
@@ -0,0 +1,48 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+#ifndef MEDIA_SESSION_ERRORS_H_
+#define MEDIA_SESSION_ERRORS_H_
+
+namespace mozilla
+{
+enum MediaConduitErrorCode
+{
+kMediaConduitNoError = 0, // 0 for Success,greater than 0 imples error
+kMediaConduitSessionNotInited = 10100, // Session not initialized.10100 serves as
+ // base for the conduit errors
+kMediaConduitMalformedArgument, // Malformed input to Conduit API
+kMediaConduitCaptureError, // WebRTC capture APIs failed
+kMediaConduitInvalidSendCodec, // Wrong Send codec
+kMediaConduitInvalidReceiveCodec, // Wrong Recv Codec
+kMediaConduitCodecInUse, // Already applied Codec
+kMediaConduitInvalidRenderer, // Null or Wrong Renderer object
+kMediaConduitRendererFail, // Add Render called multiple times
+kMediaConduitSendingAlready, // Engine already trasmitting
+kMediaConduitReceivingAlready, // Engine already receiving
+kMediaConduitTransportRegistrationFail,// Null or wrong transport interface
+kMediaConduitInvalidTransport, // Null or wrong transport interface
+kMediaConduitChannelError, // Configuration Error
+kMediaConduitSocketError, // Media Engine transport socket error
+kMediaConduitRTPRTCPModuleError, // Couldn't start RTP/RTCP processing
+kMediaConduitRTPProcessingFailed, // Processing incoming RTP frame failed
+kMediaConduitUnknownError, // More information can be found in logs
+kMediaConduitExternalRecordingError, // Couldn't start external recording
+kMediaConduitRecordingError, // Runtime recording error
+kMediaConduitExternalPlayoutError, // Couldn't start external playout
+kMediaConduitPlayoutError, // Runtime playout error
+kMediaConduitMTUError, // Can't set MTU
+kMediaConduitRTCPStatusError, // Can't set RTCP mode
+kMediaConduitKeyFrameRequestError, // Can't set KeyFrameRequest mode
+kMediaConduitNACKStatusError, // Can't set NACK mode
+kMediaConduitTMMBRStatusError, // Can't set TMMBR mode
+kMediaConduitFECStatusError, // Can't set FEC mode
+kMediaConduitHybridNACKFECStatusError // Can't set Hybrid NACK / FEC mode
+};
+
+}
+
+#endif
+
diff --git a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
new file mode 100755
index 000000000..05c34fea0
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -0,0 +1,495 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MEDIA_CONDUIT_ABSTRACTION_
+#define MEDIA_CONDUIT_ABSTRACTION_
+
+#include "nsISupportsImpl.h"
+#include "nsXPCOM.h"
+#include "nsDOMNavigationTiming.h"
+#include "mozilla/RefPtr.h"
+#include "CodecConfig.h"
+#include "VideoTypes.h"
+#include "MediaConduitErrors.h"
+
+#include "ImageContainer.h"
+
+#include "webrtc/common_types.h"
+namespace webrtc {
+class I420VideoFrame;
+}
+
+#include <vector>
+
+namespace mozilla {
+/**
+ * Abstract Interface for transporting RTP packets - audio/vidoeo
+ * The consumers of this interface are responsible for passing in
+ * the RTPfied media packets
+ */
+class TransportInterface
+{
+protected:
+ virtual ~TransportInterface() {}
+
+public:
+ /**
+ * RTP Transport Function to be implemented by concrete transport implementation
+ * @param data : RTP Packet (audio/video) to be transported
+ * @param len : Length of the media packet
+ * @result : NS_OK on success, NS_ERROR_FAILURE otherwise
+ */
+ virtual nsresult SendRtpPacket(const void* data, int len) = 0;
+
+ /**
+ * RTCP Transport Function to be implemented by concrete transport implementation
+ * @param data : RTCP Packet to be transported
+ * @param len : Length of the RTCP packet
+ * @result : NS_OK on success, NS_ERROR_FAILURE otherwise
+ */
+ virtual nsresult SendRtcpPacket(const void* data, int len) = 0;
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TransportInterface)
+};
+
+/**
+ * This class wraps image object for VideoRenderer::RenderVideoFrame()
+ * callback implementation to use for rendering.
+ */
+class ImageHandle
+{
+public:
+ explicit ImageHandle(layers::Image* image) : mImage(image) {}
+
+ const RefPtr<layers::Image>& GetImage() const { return mImage; }
+
+private:
+ RefPtr<layers::Image> mImage;
+};
+
+/**
+ * 1. Abstract renderer for video data
+ * 2. This class acts as abstract interface between the video-engine and
+ * video-engine agnostic renderer implementation.
+ * 3. Concrete implementation of this interface is responsible for
+ * processing and/or rendering the obtained raw video frame to appropriate
+ * output , say, <video>
+ */
+class VideoRenderer
+{
+protected:
+ virtual ~VideoRenderer() {}
+
+public:
+ /**
+ * Callback Function reportng any change in the video-frame dimensions
+ * @param width: current width of the video @ decoder
+ * @param height: current height of the video @ decoder
+ * @param number_of_streams: number of participating video streams
+ */
+ virtual void FrameSizeChange(unsigned int width,
+ unsigned int height,
+ unsigned int number_of_streams) = 0;
+
+ /**
+ * Callback Function reporting decoded I420 frame for processing.
+ * @param buffer: pointer to decoded video frame
+ * @param buffer_size: size of the decoded frame
+ * @param time_stamp: Decoder timestamp, typically 90KHz as per RTP
+ * @render_time: Wall-clock time at the decoder for synchronization
+ * purposes in milliseconds
+ * @handle: opaque handle for image object of decoded video frame.
+ * NOTE: If decoded video frame is passed through buffer , it is the
+ * responsibility of the concrete implementations of this class to own copy
+ * of the frame if needed for time longer than scope of this callback.
+ * Such implementations should be quick in processing the frames and return
+ * immediately.
+ * On the other hand, if decoded video frame is passed through handle, the
+ * implementations should keep a reference to the (ref-counted) image object
+ * inside until it's no longer needed.
+ */
+ virtual void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const ImageHandle& handle) = 0;
+ virtual void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t y_stride,
+ uint32_t cbcr_stride,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const ImageHandle& handle) = 0;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer)
+};
+
+
+/**
+ * Generic Interface for representing Audio/Video Session
+ * MediaSession conduit is identified by 2 main components
+ * 1. Attached Transport Interface for inbound and outbound RTP transport
+ * 2. Attached Renderer Interface for rendering media data off the network
+ * This class hides specifics of Media-Engine implementation from the consumers
+ * of this interface.
+ * Also provides codec configuration API for the media sent and recevied
+ */
+class MediaSessionConduit
+{
+protected:
+ virtual ~MediaSessionConduit() {}
+
+public:
+ enum Type { AUDIO, VIDEO } ;
+
+ virtual Type type() const = 0;
+
+ /**
+ * Function triggered on Incoming RTP packet from the remote
+ * endpoint by the transport implementation.
+ * @param data : RTP Packet (audio/video) to be processed
+ * @param len : Length of the media packet
+ * Obtained packets are passed to the Media-Engine for further
+ * processing , say, decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTPPacket(const void *data, int len) = 0;
+
+ /**
+ * Function triggered on Incoming RTCP packet from the remote
+ * endpoint by the transport implementation.
+ * @param data : RTCP Packet (audio/video) to be processed
+ * @param len : Length of the media packet
+ * Obtained packets are passed to the Media-Engine for further
+ * processing , say, decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTCPPacket(const void *data, int len) = 0;
+
+ virtual MediaConduitErrorCode StopTransmitting() = 0;
+ virtual MediaConduitErrorCode StartTransmitting() = 0;
+ virtual MediaConduitErrorCode StopReceiving() = 0;
+ virtual MediaConduitErrorCode StartReceiving() = 0;
+
+
+ /**
+ * Function to attach transmitter transport end-point of the Media conduit.
+ * @param aTransport: Reference to the concrete teansport implementation
+ * When nullptr, unsets the transmitter transport endpoint.
+ * Note: Multiple invocations of this call , replaces existing transport with
+ * with the new one.
+ * Note: This transport is used for RTP, and RTCP if no receiver transport is
+ * set. In the future, we should ensure that RTCP sender reports use this
+ * regardless of whether the receiver transport is set.
+ */
+ virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) = 0;
+
+ /**
+ * Function to attach receiver transport end-point of the Media conduit.
+ * @param aTransport: Reference to the concrete teansport implementation
+ * When nullptr, unsets the receiver transport endpoint.
+ * Note: Multiple invocations of this call , replaces existing transport with
+ * with the new one.
+ * Note: This transport is used for RTCP.
+ * Note: In the future, we should avoid using this for RTCP sender reports.
+ */
+ virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) = 0;
+
+ virtual bool SetLocalSSRC(unsigned int ssrc) = 0;
+ virtual bool GetLocalSSRC(unsigned int* ssrc) = 0;
+ virtual bool GetRemoteSSRC(unsigned int* ssrc) = 0;
+ virtual bool SetLocalCNAME(const char* cname) = 0;
+
+ /**
+ * Functions returning stats needed by w3c stats model.
+ */
+ virtual bool GetVideoEncoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* droppedFrames) = 0;
+ virtual bool GetVideoDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* discardedPackets) = 0;
+ virtual bool GetAVStats(int32_t* jitterBufferDelayMs,
+ int32_t* playoutBufferDelayMs,
+ int32_t* avSyncOffsetMs) = 0;
+ virtual bool GetRTPStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) = 0;
+ virtual bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ int32_t* rttMs) = 0;
+ virtual bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
+ unsigned int* packetsSent,
+ uint64_t* bytesSent) = 0;
+
+ virtual uint64_t CodecPluginID() = 0;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSessionConduit)
+
+};
+
+// Abstract base classes for external encoder/decoder.
+class CodecPluginID
+{
+public:
+ virtual ~CodecPluginID() {}
+
+ virtual uint64_t PluginID() const = 0;
+};
+
+class VideoEncoder : public CodecPluginID
+{
+public:
+ virtual ~VideoEncoder() {}
+};
+
+class VideoDecoder : public CodecPluginID
+{
+public:
+ virtual ~VideoDecoder() {}
+};
+
+/**
+ * MediaSessionConduit for video
+ * Refer to the comments on MediaSessionConduit above for overall
+ * information
+ */
+class VideoSessionConduit : public MediaSessionConduit
+{
+public:
+ /**
+ * Factory function to create and initialize a Video Conduit Session
+ * return: Concrete VideoSessionConduitObject or nullptr in the case
+ * of failure
+ */
+ static RefPtr<VideoSessionConduit> Create();
+
+ enum FrameRequestType
+ {
+ FrameRequestNone,
+ FrameRequestFir,
+ FrameRequestPli,
+ FrameRequestUnknown
+ };
+
+ VideoSessionConduit() : mFrameRequestMethod(FrameRequestNone),
+ mUsingNackBasic(false),
+ mUsingTmmbr(false),
+ mUsingFEC(false) {}
+
+ virtual ~VideoSessionConduit() {}
+
+ virtual Type type() const { return VIDEO; }
+
+ /**
+ * Function to attach Renderer end-point of the Media-Video conduit.
+ * @param aRenderer : Reference to the concrete Video renderer implementation
+ * Note: Multiple invocations of this API shall remove an existing renderer
+ * and attaches the new to the Conduit.
+ */
+ virtual MediaConduitErrorCode AttachRenderer(RefPtr<VideoRenderer> aRenderer) = 0;
+ virtual void DetachRenderer() = 0;
+
+ /**
+ * Function to deliver a capture video frame for encoding and transport
+ * @param video_frame: pointer to captured video-frame.
+ * @param video_frame_length: size of the frame
+ * @param width, height: dimensions of the frame
+ * @param video_type: Type of the video frame - I420, RAW
+ * @param captured_time: timestamp when the frame was captured.
+ * if 0 timestamp is automatcally generated
+ * NOTE: ConfigureSendMediaCodec() MUST be called before this function can be invoked
+ * This ensures the inserted video-frames can be transmitted by the conduit
+ */
+ virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame,
+ unsigned int video_frame_length,
+ unsigned short width,
+ unsigned short height,
+ VideoType video_type,
+ uint64_t capture_time) = 0;
+ virtual MediaConduitErrorCode SendVideoFrame(webrtc::I420VideoFrame& frame) = 0;
+
+ virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) = 0;
+ /**
+ * Function to configure send codec for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the video engine is configured with passed in codec for send
+ * On failure, video engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
+ * transmission sub-system on the engine
+ *
+ */
+ virtual MediaConduitErrorCode ConfigureSendMediaCodec(const VideoCodecConfig* sendSessionConfig) = 0;
+
+ /**
+ * Function to configurelist of receive codecs for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
+ * reception sub-system on the engine
+ *
+ */
+ virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<VideoCodecConfig* >& recvCodecConfigList) = 0;
+
+ /**
+ * Set an external encoder
+ * @param encoder
+ * @result: on success, we will use the specified encoder
+ */
+ virtual MediaConduitErrorCode SetExternalSendCodec(VideoCodecConfig* config,
+ VideoEncoder* encoder) = 0;
+
+ /**
+ * Set an external decoder
+ * @param decoder
+ * @result: on success, we will use the specified decoder
+ */
+ virtual MediaConduitErrorCode SetExternalRecvCodec(VideoCodecConfig* config,
+ VideoDecoder* decoder) = 0;
+
+ /**
+ * Function to enable the RTP Stream ID (RID) extension
+ * @param enabled: enable extension
+ * @param id: id to be used for this rtp header extension
+ * NOTE: See VideoConduit for more information
+ */
+ virtual MediaConduitErrorCode EnableRTPStreamIdExtension(bool enabled, uint8_t id) = 0;
+
+ /**
+ * These methods allow unit tests to double-check that the
+ * max-fs and max-fr related settings are as expected.
+ */
+ virtual unsigned short SendingWidth() = 0;
+
+ virtual unsigned short SendingHeight() = 0;
+
+ virtual unsigned int SendingMaxFs() = 0;
+
+ virtual unsigned int SendingMaxFr() = 0;
+
+ /**
+ * These methods allow unit tests to double-check that the
+ * rtcp-fb settings are as expected.
+ */
+ FrameRequestType FrameRequestMethod() const {
+ return mFrameRequestMethod;
+ }
+
+ bool UsingNackBasic() const {
+ return mUsingNackBasic;
+ }
+
+ bool UsingTmmbr() const {
+ return mUsingTmmbr;
+ }
+
+ bool UsingFEC() const {
+ return mUsingFEC;
+ }
+
+ protected:
+ /* RTCP feedback settings, for unit testing purposes */
+ FrameRequestType mFrameRequestMethod;
+ bool mUsingNackBasic;
+ bool mUsingTmmbr;
+ bool mUsingFEC;
+};
+
+/**
+ * MediaSessionConduit for audio
+ * Refer to the comments on MediaSessionConduit above for overall
+ * information
+ */
+class AudioSessionConduit : public MediaSessionConduit
+{
+public:
+
+ /**
+ * Factory function to create and initialize an Audio Conduit Session
+ * return: Concrete AudioSessionConduitObject or nullptr in the case
+ * of failure
+ */
+ static RefPtr<AudioSessionConduit> Create();
+
+ virtual ~AudioSessionConduit() {}
+
+ virtual Type type() const { return AUDIO; }
+
+
+ /**
+ * Function to deliver externally captured audio sample for encoding and transport
+ * @param audioData [in]: Pointer to array containing a frame of audio
+ * @param lengthSamples [in]: Length of audio frame in samples in multiple of 10 milliseconds
+ * Ex: Frame length is 160, 320, 440 for 16, 32, 44 kHz sampling rates
+ respectively.
+ audioData[] is lengthSamples in size
+ say, for 16kz sampling rate, audioData[] should contain 160
+ samples of 16-bits each for a 10m audio frame.
+ * @param samplingFreqHz [in]: Frequency/rate of the sampling in Hz ( 16000, 32000 ...)
+ * @param capture_delay [in]: Approx Delay from recording until it is delivered to VoiceEngine
+ in milliseconds.
+ * NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked
+ * This ensures the inserted audio-samples can be transmitted by the conduit
+ *
+ */
+ virtual MediaConduitErrorCode SendAudioFrame(const int16_t audioData[],
+ int32_t lengthSamples,
+ int32_t samplingFreqHz,
+ int32_t capture_delay) = 0;
+
+ /**
+ * Function to grab a decoded audio-sample from the media engine for rendering
+ * / playoutof length 10 milliseconds.
+ *
+ * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
+ * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
+ * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback
+ * @param lengthSamples [out]: Will contain length of the audio frame in samples at return.
+ Ex: A value of 160 implies 160 samples each of 16-bits was copied
+ into speechData
+ * NOTE: This function should be invoked every 10 milliseconds for the best
+ * peformance
+ * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked
+ * This ensures the decoded samples are ready for reading.
+ *
+ */
+ virtual MediaConduitErrorCode GetAudioFrame(int16_t speechData[],
+ int32_t samplingFreqHz,
+ int32_t capture_delay,
+ int& lengthSamples) = 0;
+
+ /**
+ * Function to configure send codec for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * NOTE: See VideoConduit for more information
+ */
+
+ virtual MediaConduitErrorCode ConfigureSendMediaCodec(const AudioCodecConfig* sendCodecConfig) = 0;
+
+ /**
+ * Function to configure list of receive codecs for the audio session
+ * @param sendSessionConfig: CodecConfiguration
+ * NOTE: See VideoConduit for more information
+ */
+ virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<AudioCodecConfig* >& recvCodecConfigList) = 0;
+ /**
+ * Function to enable the audio level extension
+ * @param enabled: enable extension
+ * @param id: id to be used for this rtp header extension
+ * NOTE: See AudioConduit for more information
+ */
+ virtual MediaConduitErrorCode EnableAudioLevelExtension(bool enabled, uint8_t id) = 0;
+
+ virtual bool SetDtmfPayloadType(unsigned char type) = 0;
+
+ virtual bool InsertDTMFTone(int channel, int eventCode, bool outOfBand,
+ int lengthMs, int attenuationDb) = 0;
+
+};
+}
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp
new file mode 100644
index 000000000..d46398402
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.cpp
@@ -0,0 +1,30 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OMXVideoCodec.h"
+
+#ifdef WEBRTC_GONK
+#include "WebrtcOMXH264VideoCodec.h"
+#endif
+
+namespace mozilla {
+
+VideoEncoder*
+OMXVideoCodec::CreateEncoder(CodecType aCodecType)
+{
+ if (aCodecType == CODEC_H264) {
+ return new WebrtcOMXH264VideoEncoder();
+ }
+ return nullptr;
+}
+
+VideoDecoder*
+OMXVideoCodec::CreateDecoder(CodecType aCodecType) {
+ if (aCodecType == CODEC_H264) {
+ return new WebrtcOMXH264VideoDecoder();
+ }
+ return nullptr;
+}
+
+}
diff --git a/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h
new file mode 100644
index 000000000..51df50263
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/OMXVideoCodec.h
@@ -0,0 +1,32 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef OMX_VIDEO_CODEC_H_
+#define OMX_VIDEO_CODEC_H_
+
+#include "MediaConduitInterface.h"
+
+namespace mozilla {
+class OMXVideoCodec {
+ public:
+ enum CodecType {
+ CODEC_H264,
+ };
+
+ /**
+ * Create encoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static VideoEncoder* CreateEncoder(CodecType aCodecType);
+
+ /**
+ * Create decoder object for codec type |aCodecType|. Return |nullptr| when
+ * failed.
+ */
+ static VideoDecoder* CreateDecoder(CodecType aCodecType);
+};
+
+}
+
+#endif // OMX_VIDEO_CODEC_H_
diff --git a/media/webrtc/signaling/src/media-conduit/RunningStat.h b/media/webrtc/signaling/src/media-conduit/RunningStat.h
new file mode 100644
index 000000000..1d0cdbeca
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/RunningStat.h
@@ -0,0 +1,66 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+/* Adapted from "Accurately computing running variance - John D. Cook"
+ http://www.johndcook.com/standard_deviation.html */
+
+#ifndef RUNNING_STAT_H_
+#define RUNNING_STAT_H_
+#include <math.h>
+
+namespace mozilla {
+
+class RunningStat
+{
+public:
+ RunningStat() : mN(0) {}
+
+ void Clear()
+ {
+ mN = 0;
+ }
+
+ void Push(double x)
+ {
+ mN++;
+
+ // See Knuth TAOCP vol 2, 3rd edition, page 232
+ if (mN == 1)
+ {
+ mOldM = mNewM = x;
+ mOldS = 0.0;
+ } else {
+ mNewM = mOldM + (x - mOldM) / mN;
+ mNewS = mOldS + (x - mOldM) * (x - mNewM);
+
+ // set up for next iteration
+ mOldM = mNewM;
+ mOldS = mNewS;
+ }
+ }
+
+ int NumDataValues() const
+ {
+ return mN;
+ }
+
+ double Mean() const
+ {
+ return (mN > 0) ? mNewM : 0.0;
+ }
+
+ double Variance() const
+ {
+ return (mN > 1) ? mNewS / (mN - 1) : 0.0;
+ }
+
+ double StandardDeviation() const
+ {
+ return sqrt(Variance());
+ }
+
+private:
+ int mN;
+ double mOldM, mNewM, mOldS, mNewS;
+};
+}
+#endif //RUNNING_STAT_H_
diff --git a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
new file mode 100755
index 000000000..3f0445122
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -0,0 +1,2129 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+#include "nspr.h"
+#include "plstr.h"
+
+#include "VideoConduit.h"
+#include "AudioConduit.h"
+#include "nsThreadUtils.h"
+#include "LoadManager.h"
+#include "YuvStamper.h"
+#include "nsServiceManagerUtils.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+#include "mozilla/media/MediaUtils.h"
+#include "mozilla/TemplateLib.h"
+
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/interface/native_handle.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/video_engine/include/vie_errors.h"
+#include "webrtc/video_engine/vie_defines.h"
+
+#include "mozilla/Unused.h"
+
+#ifdef MOZ_WIDGET_ANDROID
+#include "AndroidJNIWrapper.h"
+#endif
+
+// for ntohs
+#ifdef _MSC_VER
+#include "Winsock2.h"
+#else
+#include <netinet/in.h>
+#endif
+
+#include <algorithm>
+#include <math.h>
+
+#define DEFAULT_VIDEO_MAX_FRAMERATE 30
+#define INVALID_RTP_PAYLOAD 255 //valid payload types are 0 to 127
+
+namespace mozilla {
+
+static const char* logTag ="WebrtcVideoSessionConduit";
+
+// 32 bytes is what WebRTC CodecInst expects
+const unsigned int WebrtcVideoConduit::CODEC_PLNAME_SIZE = 32;
+
+/**
+ * Factory Method for VideoConduit
+ */
+RefPtr<VideoSessionConduit>
+VideoSessionConduit::Create()
+{
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ WebrtcVideoConduit* obj = new WebrtcVideoConduit();
+ if(obj->Init() != kMediaConduitNoError)
+ {
+ CSFLogError(logTag, "%s VideoConduit Init Failed ", __FUNCTION__);
+ delete obj;
+ return nullptr;
+ }
+ CSFLogDebug(logTag, "%s Successfully created VideoConduit ", __FUNCTION__);
+ return obj;
+}
+
+WebrtcVideoConduit::WebrtcVideoConduit():
+ mVideoEngine(nullptr),
+ mTransportMonitor("WebrtcVideoConduit"),
+ mTransmitterTransport(nullptr),
+ mReceiverTransport(nullptr),
+ mRenderer(nullptr),
+ mPtrExtCapture(nullptr),
+ mEngineTransmitting(false),
+ mEngineReceiving(false),
+ mChannel(-1),
+ mCapId(-1),
+ mCodecMutex("VideoConduit codec db"),
+ mInReconfig(false),
+ mLastWidth(0), // forces a check for reconfig at start
+ mLastHeight(0),
+ mSendingWidth(0),
+ mSendingHeight(0),
+ mReceivingWidth(0),
+ mReceivingHeight(0),
+ mSendingFramerate(DEFAULT_VIDEO_MAX_FRAMERATE),
+ mLastFramerateTenths(DEFAULT_VIDEO_MAX_FRAMERATE*10),
+ mNumReceivingStreams(1),
+ mVideoLatencyTestEnable(false),
+ mVideoLatencyAvg(0),
+ mMinBitrate(0),
+ mStartBitrate(0),
+ mMaxBitrate(0),
+ mMinBitrateEstimate(0),
+ mRtpStreamIdEnabled(false),
+ mRtpStreamIdExtId(0),
+ mCodecMode(webrtc::kRealtimeVideo)
+{}
+
+WebrtcVideoConduit::~WebrtcVideoConduit()
+{
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ // Release AudioConduit first by dropping reference on MainThread, where it expects to be
+ SyncTo(nullptr);
+ Destroy();
+}
+
+bool WebrtcVideoConduit::SetLocalSSRC(unsigned int ssrc)
+{
+ unsigned int oldSsrc;
+ if (!GetLocalSSRC(&oldSsrc)) {
+ MOZ_ASSERT(false, "GetLocalSSRC failed");
+ return false;
+ }
+
+ if (oldSsrc == ssrc) {
+ return true;
+ }
+
+ bool wasTransmitting = mEngineTransmitting;
+ if (StopTransmitting() != kMediaConduitNoError) {
+ return false;
+ }
+
+ if (mPtrRTP->SetLocalSSRC(mChannel, ssrc)) {
+ return false;
+ }
+
+ if (wasTransmitting) {
+ if (StartTransmitting() != kMediaConduitNoError) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WebrtcVideoConduit::GetLocalSSRC(unsigned int* ssrc)
+{
+ return !mPtrRTP->GetLocalSSRC(mChannel, *ssrc);
+}
+
+bool WebrtcVideoConduit::GetRemoteSSRC(unsigned int* ssrc)
+{
+ return !mPtrRTP->GetRemoteSSRC(mChannel, *ssrc);
+}
+
+bool WebrtcVideoConduit::SetLocalCNAME(const char* cname)
+{
+ char temp[256];
+ strncpy(temp, cname, sizeof(temp) - 1);
+ temp[sizeof(temp) - 1] = 0;
+ return !mPtrRTP->SetRTCPCName(mChannel, temp);
+}
+
+bool WebrtcVideoConduit::GetVideoEncoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* droppedFrames)
+{
+ if (!mEngineTransmitting) {
+ return false;
+ }
+ MOZ_ASSERT(mVideoCodecStat);
+ mVideoCodecStat->GetEncoderStats(framerateMean, framerateStdDev,
+ bitrateMean, bitrateStdDev,
+ droppedFrames);
+
+ // See if we need to adjust bandwidth.
+ // Avoid changing bandwidth constantly; use hysteresis.
+
+ // Note: mLastFramerate is a relaxed Atomic because we're setting it here, and
+ // reading it on whatever thread calls DeliverFrame/SendVideoFrame. Alternately
+ // we could use a lock. Note that we don't change it often, and read it once per frame.
+ // We scale by *10 because mozilla::Atomic<> doesn't do 'double' or 'float'.
+ double framerate = mLastFramerateTenths/10.0; // fetch once
+ if (std::abs(*framerateMean - framerate)/framerate > 0.1 &&
+ *framerateMean >= 0.5) {
+ // unchanged resolution, but adjust bandwidth limits to match camera fps
+ CSFLogDebug(logTag, "Encoder frame rate changed from %f to %f",
+ (mLastFramerateTenths/10.0), *framerateMean);
+ MutexAutoLock lock(mCodecMutex);
+ mLastFramerateTenths = *framerateMean * 10;
+ SelectSendResolution(mSendingWidth, mSendingHeight, nullptr);
+ }
+ return true;
+}
+
+bool WebrtcVideoConduit::GetVideoDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* discardedPackets)
+{
+ if (!mEngineReceiving) {
+ return false;
+ }
+ MOZ_ASSERT(mVideoCodecStat);
+ mVideoCodecStat->GetDecoderStats(framerateMean, framerateStdDev,
+ bitrateMean, bitrateStdDev,
+ discardedPackets);
+ return true;
+}
+
+bool WebrtcVideoConduit::GetAVStats(int32_t* jitterBufferDelayMs,
+ int32_t* playoutBufferDelayMs,
+ int32_t* avSyncOffsetMs) {
+ return false;
+}
+
+bool WebrtcVideoConduit::GetRTPStats(unsigned int* jitterMs,
+ unsigned int* cumulativeLost) {
+ unsigned short fractionLost;
+ unsigned extendedMax;
+ int64_t rttMs;
+ // GetReceivedRTCPStatistics is a poorly named GetRTPStatistics variant
+ return !mPtrRTP->GetReceivedRTCPStatistics(mChannel, fractionLost,
+ *cumulativeLost,
+ extendedMax,
+ *jitterMs,
+ rttMs);
+}
+
+bool WebrtcVideoConduit::GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ int32_t* rttMs) {
+ uint32_t ntpHigh, ntpLow;
+ uint16_t fractionLost;
+ bool result = !mPtrRTP->GetRemoteRTCPReceiverInfo(mChannel, ntpHigh, ntpLow,
+ *packetsReceived,
+ *bytesReceived,
+ jitterMs,
+ &fractionLost,
+ cumulativeLost,
+ rttMs);
+ if (result) {
+ *timestamp = NTPtoDOMHighResTimeStamp(ntpHigh, ntpLow);
+ }
+ return result;
+}
+
+bool WebrtcVideoConduit::GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
+ unsigned int* packetsSent,
+ uint64_t* bytesSent) {
+ struct webrtc::SenderInfo senderInfo;
+ bool result = !mPtrRTP->GetRemoteRTCPSenderInfo(mChannel, &senderInfo);
+ if (result) {
+ *timestamp = NTPtoDOMHighResTimeStamp(senderInfo.NTP_timestamp_high,
+ senderInfo.NTP_timestamp_low);
+ *packetsSent = senderInfo.sender_packet_count;
+ *bytesSent = senderInfo.sender_octet_count;
+ }
+ return result;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::InitMain()
+{
+#if defined(MOZILLA_INTERNAL_API)
+ // already know we must be on MainThread barring unit test weirdness
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsresult rv;
+ nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
+ if (!NS_WARN_IF(NS_FAILED(rv)))
+ {
+ nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
+
+ if (branch)
+ {
+ int32_t temp;
+ Unused << NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.video.test_latency", &mVideoLatencyTestEnable)));
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.min_bitrate", &temp))))
+ {
+ if (temp >= 0) {
+ mMinBitrate = temp;
+ }
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.start_bitrate", &temp))))
+ {
+ if (temp >= 0) {
+ mStartBitrate = temp;
+ }
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.max_bitrate", &temp))))
+ {
+ if (temp >= 0) {
+ mMaxBitrate = temp;
+ }
+ }
+ if (mMinBitrate != 0 && mMinBitrate < webrtc::kViEMinCodecBitrate) {
+ mMinBitrate = webrtc::kViEMinCodecBitrate;
+ }
+ if (mStartBitrate < mMinBitrate) {
+ mStartBitrate = mMinBitrate;
+ }
+ if (mStartBitrate > mMaxBitrate) {
+ mStartBitrate = mMaxBitrate;
+ }
+ if (!NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.min_bitrate_estimate", &temp))))
+ {
+ if (temp >= 0) {
+ mMinBitrateEstimate = temp;
+ }
+ }
+ bool use_loadmanager = false;
+ if (!NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.navigator.load_adapt", &use_loadmanager))))
+ {
+ if (use_loadmanager) {
+ mLoadManager = LoadManagerBuild();
+ }
+ }
+ }
+ }
+
+#ifdef MOZ_WIDGET_ANDROID
+ // get the JVM
+ JavaVM *jvm = jsjni_GetVM();
+
+ if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) {
+ CSFLogError(logTag, "%s: could not set Android objects", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+#endif
+#endif
+ return kMediaConduitNoError;
+}
+
+/**
+ * Performs initialization of the MANDATORY components of the Video Engine
+ */
+MediaConduitErrorCode
+WebrtcVideoConduit::Init()
+{
+ CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this);
+ MediaConduitErrorCode result;
+ // Run code that must run on MainThread first
+ MOZ_ASSERT(NS_IsMainThread());
+ result = InitMain();
+ if (result != kMediaConduitNoError) {
+ return result;
+ }
+
+ // Per WebRTC APIs below function calls return nullptr on failure
+ mVideoEngine = webrtc::VideoEngine::Create();
+ if(!mVideoEngine)
+ {
+ CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get video capture interface", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( !(mPtrViECodec = ViECodec::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get video codec interface ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( !(mPtrViENetwork = ViENetwork::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get video network interface ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( !(mPtrViERender = ViERender::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get video render interface ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine);
+ if (!mPtrExtCodec) {
+ CSFLogError(logTag, "%s Unable to get external codec interface: %d ",
+ __FUNCTION__,mPtrViEBase->LastError());
+ return kMediaConduitSessionNotInited;
+ }
+
+ if( !(mPtrRTP = webrtc::ViERTP_RTCP::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get video RTCP interface ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ if ( !(mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine)))
+ {
+ CSFLogError(logTag, "%s Unable to get external codec interface %d ",
+ __FUNCTION__, mPtrViEBase->LastError());
+ return kMediaConduitSessionNotInited;
+ }
+
+ CSFLogDebug(logTag, "%s Engine Created: Init'ng the interfaces ",__FUNCTION__);
+
+ if(mPtrViEBase->Init() == -1)
+ {
+ CSFLogError(logTag, " %s Video Engine Init Failed %d ",__FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitSessionNotInited;
+ }
+
+ if(mPtrViEBase->CreateChannel(mChannel) == -1)
+ {
+ CSFLogError(logTag, " %s Channel creation Failed %d ",__FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitChannelError;
+ }
+
+ if(mPtrViENetwork->RegisterSendTransport(mChannel, *this) == -1)
+ {
+ CSFLogError(logTag, "%s ViENetwork Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitTransportRegistrationFail;
+ }
+
+ if(mPtrViECapture->AllocateExternalCaptureDevice(mCapId,
+ mPtrExtCapture) == -1)
+ {
+ CSFLogError(logTag, "%s Unable to Allocate capture module: %d ",
+ __FUNCTION__, mPtrViEBase->LastError());
+ return kMediaConduitCaptureError;
+ }
+
+ if(mPtrViECapture->ConnectCaptureDevice(mCapId,mChannel) == -1)
+ {
+ CSFLogError(logTag, "%s Unable to Connect capture module: %d ",
+ __FUNCTION__,mPtrViEBase->LastError());
+ return kMediaConduitCaptureError;
+ }
+ // Set up some parameters, per juberti. Set MTU.
+ if(mPtrViENetwork->SetMTU(mChannel, 1200) != 0)
+ {
+ CSFLogError(logTag, "%s MTU Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitMTUError;
+ }
+ // Turn on RTCP and loss feedback reporting.
+ if(mPtrRTP->SetRTCPStatus(mChannel, webrtc::kRtcpCompound_RFC4585) != 0)
+ {
+ CSFLogError(logTag, "%s RTCPStatus Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitRTCPStatusError;
+ }
+
+ if (mPtrViERender->AddRenderer(mChannel,
+ webrtc::kVideoI420,
+ (webrtc::ExternalRenderer*) this) == -1) {
+ CSFLogError(logTag, "%s Failed to added external renderer ", __FUNCTION__);
+ return kMediaConduitInvalidRenderer;
+ }
+
+ if (mLoadManager) {
+ mPtrViEBase->RegisterCpuOveruseObserver(mChannel, mLoadManager);
+ mPtrViEBase->SetLoadManager(mLoadManager);
+ }
+
+ CSFLogError(logTag, "%s Initialization Done", __FUNCTION__);
+ return kMediaConduitNoError;
+}
+
+void
+WebrtcVideoConduit::Destroy()
+{
+ // The first one of a pair to be deleted shuts down media for both
+ //Deal with External Capturer
+ if(mPtrViECapture)
+ {
+ mPtrViECapture->DisconnectCaptureDevice(mCapId);
+ mPtrViECapture->ReleaseCaptureDevice(mCapId);
+ mPtrExtCapture = nullptr;
+ }
+
+ if (mPtrExtCodec) {
+ mPtrExtCodec->Release();
+ mPtrExtCodec = NULL;
+ }
+
+ //Deal with External Renderer
+ if(mPtrViERender)
+ {
+ if(mRenderer) {
+ mPtrViERender->StopRender(mChannel);
+ }
+ mPtrViERender->RemoveRenderer(mChannel);
+ }
+
+ //Deal with the transport
+ if(mPtrViENetwork)
+ {
+ mPtrViENetwork->DeregisterSendTransport(mChannel);
+ }
+
+ if(mPtrViEBase)
+ {
+ mPtrViEBase->StopSend(mChannel);
+ mPtrViEBase->StopReceive(mChannel);
+ mPtrViEBase->DeleteChannel(mChannel);
+ }
+
+ // mVideoCodecStat has a back-ptr to mPtrViECodec that must be released first
+ if (mVideoCodecStat) {
+ mVideoCodecStat->EndOfCallStats();
+ }
+ mVideoCodecStat = nullptr;
+ // We can't delete the VideoEngine until all these are released!
+ // And we can't use a Scoped ptr, since the order is arbitrary
+ mPtrViEBase = nullptr;
+ mPtrViECapture = nullptr;
+ mPtrViECodec = nullptr;
+ mPtrViENetwork = nullptr;
+ mPtrViERender = nullptr;
+ mPtrRTP = nullptr;
+ mPtrExtCodec = nullptr;
+
+ // only one opener can call Delete. Have it be the last to close.
+ if(mVideoEngine)
+ {
+ webrtc::VideoEngine::Delete(mVideoEngine);
+ }
+}
+
+void
+WebrtcVideoConduit::SyncTo(WebrtcAudioConduit *aConduit)
+{
+ CSFLogDebug(logTag, "%s Synced to %p", __FUNCTION__, aConduit);
+
+ // SyncTo(value) syncs to the AudioConduit, and if already synced replaces
+ // the current sync target. SyncTo(nullptr) cancels any existing sync and
+ // releases the strong ref to AudioConduit.
+ if (aConduit) {
+ mPtrViEBase->SetVoiceEngine(aConduit->GetVoiceEngine());
+ mPtrViEBase->ConnectAudioChannel(mChannel, aConduit->GetChannel());
+ // NOTE: this means the VideoConduit will keep the AudioConduit alive!
+ } else {
+ mPtrViEBase->DisconnectAudioChannel(mChannel);
+ mPtrViEBase->SetVoiceEngine(nullptr);
+ }
+
+ mSyncedTo = aConduit;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::AttachRenderer(RefPtr<VideoRenderer> aVideoRenderer)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ //null renderer
+ if(!aVideoRenderer)
+ {
+ CSFLogError(logTag, "%s NULL Renderer", __FUNCTION__);
+ MOZ_ASSERT(false);
+ return kMediaConduitInvalidRenderer;
+ }
+
+ // This function is called only from main, so we only need to protect against
+ // modifying mRenderer while any webrtc.org code is trying to use it.
+ bool wasRendering;
+ {
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ wasRendering = !!mRenderer;
+ mRenderer = aVideoRenderer;
+ // Make sure the renderer knows the resolution
+ mRenderer->FrameSizeChange(mReceivingWidth,
+ mReceivingHeight,
+ mNumReceivingStreams);
+ }
+
+ if (!wasRendering) {
+ if(mPtrViERender->StartRender(mChannel) == -1)
+ {
+ CSFLogError(logTag, "%s Starting the Renderer Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ mRenderer = nullptr;
+ return kMediaConduitRendererFail;
+ }
+ }
+
+ return kMediaConduitNoError;
+}
+
+void
+WebrtcVideoConduit::DetachRenderer()
+{
+ {
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mRenderer)
+ {
+ mRenderer = nullptr;
+ }
+ }
+
+ mPtrViERender->StopRender(mChannel);
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SetTransmitterTransport(RefPtr<TransportInterface> aTransport)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mTransmitterTransport = aTransport;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SetReceiverTransport(RefPtr<TransportInterface> aTransport)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ // set the transport
+ mReceiverTransport = aTransport;
+ return kMediaConduitNoError;
+}
+MediaConduitErrorCode
+WebrtcVideoConduit::ConfigureCodecMode(webrtc::VideoCodecMode mode)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ mCodecMode = mode;
+ return kMediaConduitNoError;
+}
+/**
+ * Note: Setting the send-codec on the Video Engine will restart the encoder,
+ * sets up new SSRC and reset RTP_RTCP module with the new codec setting.
+ *
+ * Note: this is called from MainThread, and the codec settings are read on
+ * videoframe delivery threads (i.e in SendVideoFrame(). With
+ * renegotiation/reconfiguration, this now needs a lock! Alternatively
+ * changes could be queued until the next frame is delivered using an
+ * Atomic pointer and swaps.
+ */
+MediaConduitErrorCode
+WebrtcVideoConduit::ConfigureSendMediaCodec(const VideoCodecConfig* codecConfig)
+{
+ CSFLogDebug(logTag, "%s for %s", __FUNCTION__, codecConfig ? codecConfig->mName.c_str() : "<null>");
+ bool codecFound = false;
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+ int error = 0; //webrtc engine errors
+ webrtc::VideoCodec video_codec;
+ std::string payloadName;
+
+ memset(&video_codec, 0, sizeof(video_codec));
+
+ {
+ //validate basic params
+ if((condError = ValidateCodecConfig(codecConfig,true)) != kMediaConduitNoError)
+ {
+ return condError;
+ }
+ }
+
+ condError = StopTransmitting();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ if (mRtpStreamIdEnabled) {
+ video_codec.ridId = mRtpStreamIdExtId;
+ }
+ if (mExternalSendCodec &&
+ codecConfig->mType == mExternalSendCodec->mType) {
+ CSFLogError(logTag, "%s Configuring External H264 Send Codec", __FUNCTION__);
+
+ // width/height will be overridden on the first frame
+ video_codec.width = 320;
+ video_codec.height = 240;
+#ifdef MOZ_WEBRTC_OMX
+ if (codecConfig->mType == webrtc::kVideoCodecH264) {
+ video_codec.resolution_divisor = 16;
+ } else {
+ video_codec.resolution_divisor = 1; // We could try using it to handle odd resolutions
+ }
+#else
+ video_codec.resolution_divisor = 1; // We could try using it to handle odd resolutions
+#endif
+ video_codec.qpMax = 56;
+ video_codec.numberOfSimulcastStreams = 1;
+ video_codec.simulcastStream[0].jsScaleDownBy =
+ codecConfig->mEncodingConstraints.scaleDownBy;
+ video_codec.mode = mCodecMode;
+
+ codecFound = true;
+ } else {
+ // we should be good here to set the new codec.
+ for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++)
+ {
+ if(0 == mPtrViECodec->GetCodec(idx, video_codec))
+ {
+ payloadName = video_codec.plName;
+ if(codecConfig->mName.compare(payloadName) == 0)
+ {
+ // Note: side-effect of this is that video_codec is filled in
+ // by GetCodec()
+ codecFound = true;
+ break;
+ }
+ }
+ }//for
+ }
+
+ if(codecFound == false)
+ {
+ CSFLogError(logTag, "%s Codec Mismatch ", __FUNCTION__);
+ return kMediaConduitInvalidSendCodec;
+ }
+ // Note: only for overriding parameters from GetCodec()!
+ CodecConfigToWebRTCCodec(codecConfig, video_codec);
+ if (mSendingWidth != 0) {
+ // We're already in a call and are reconfiguring (perhaps due to
+ // ReplaceTrack). Set to match the last frame we sent.
+
+ // We could also set mLastWidth to 0, to force immediate reconfig -
+ // more expensive, but perhaps less risk of missing something. Really
+ // on ReplaceTrack we should just call ConfigureCodecMode(), and if the
+ // mode changed, we re-configure.
+ // Do this after CodecConfigToWebRTCCodec() to avoid messing up simulcast
+ video_codec.width = mSendingWidth;
+ video_codec.height = mSendingHeight;
+ video_codec.maxFramerate = mSendingFramerate;
+ } else {
+ mSendingWidth = 0;
+ mSendingHeight = 0;
+ mSendingFramerate = video_codec.maxFramerate;
+ }
+
+ video_codec.mode = mCodecMode;
+
+ if(mPtrViECodec->SetSendCodec(mChannel, video_codec) == -1)
+ {
+ error = mPtrViEBase->LastError();
+ if(error == kViECodecInvalidCodec)
+ {
+ CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__);
+ return kMediaConduitInvalidSendCodec;
+ }
+ CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+
+ if (mMinBitrateEstimate != 0) {
+ mPtrViENetwork->SetBitrateConfig(mChannel,
+ mMinBitrateEstimate,
+ std::max(video_codec.startBitrate,
+ mMinBitrateEstimate),
+ std::max(video_codec.maxBitrate,
+ mMinBitrateEstimate));
+ }
+
+ if (!mVideoCodecStat) {
+ mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec);
+ }
+ mVideoCodecStat->Register(true);
+
+ // See Bug 1297058, enabling FEC when NACK is set on H.264 is problematic
+ bool use_fec = codecConfig->RtcpFbFECIsSet();
+ if ((mExternalSendCodec && codecConfig->mType == mExternalSendCodec->mType)
+ || codecConfig->mType == webrtc::kVideoCodecH264) {
+ if(codecConfig->RtcpFbNackIsSet("")) {
+ use_fec = false;
+ }
+ }
+
+ if (use_fec)
+ {
+ uint8_t payload_type_red = INVALID_RTP_PAYLOAD;
+ uint8_t payload_type_ulpfec = INVALID_RTP_PAYLOAD;
+ if (!DetermineREDAndULPFECPayloadTypes(payload_type_red, payload_type_ulpfec)) {
+ CSFLogError(logTag, "%s Unable to set FEC status: could not determine"
+ "payload type: red %u ulpfec %u",
+ __FUNCTION__, payload_type_red, payload_type_ulpfec);
+ return kMediaConduitFECStatusError;
+ }
+
+ if(codecConfig->RtcpFbNackIsSet("")) {
+ CSFLogDebug(logTag, "Enabling NACK/FEC (send) for video stream\n");
+ if (mPtrRTP->SetHybridNACKFECStatus(mChannel, true,
+ payload_type_red,
+ payload_type_ulpfec) != 0) {
+ CSFLogError(logTag, "%s SetHybridNACKFECStatus Failed %d ",
+ __FUNCTION__, mPtrViEBase->LastError());
+ return kMediaConduitHybridNACKFECStatusError;
+ }
+ } else {
+ CSFLogDebug(logTag, "Enabling FEC (send) for video stream\n");
+ if (mPtrRTP->SetFECStatus(mChannel, true,
+ payload_type_red, payload_type_ulpfec) != 0)
+ {
+ CSFLogError(logTag, "%s SetFECStatus Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitFECStatusError;
+ }
+ }
+ } else if(codecConfig->RtcpFbNackIsSet("")) {
+ CSFLogDebug(logTag, "Enabling NACK (send) for video stream\n");
+ if (mPtrRTP->SetNACKStatus(mChannel, true) != 0)
+ {
+ CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitNACKStatusError;
+ }
+ }
+
+ {
+ MutexAutoLock lock(mCodecMutex);
+
+ //Copy the applied config for future reference.
+ mCurSendCodecConfig = new VideoCodecConfig(*codecConfig);
+ }
+
+ bool remb_requested = codecConfig->RtcpFbRembIsSet();
+ mPtrRTP->SetRembStatus(mChannel, true, remb_requested);
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::ConfigureRecvMediaCodecs(
+ const std::vector<VideoCodecConfig* >& codecConfigList)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ MediaConduitErrorCode condError = kMediaConduitNoError;
+ bool success = false;
+ std::string payloadName;
+
+ condError = StopReceiving();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ if(codecConfigList.empty())
+ {
+ CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ webrtc::ViEKeyFrameRequestMethod kf_request = webrtc::kViEKeyFrameRequestNone;
+ bool use_nack_basic = false;
+ bool use_tmmbr = false;
+ bool use_remb = false;
+ bool use_fec = false;
+
+ //Try Applying the codecs in the list
+ // we treat as success if atleast one codec was applied and reception was
+ // started successfully.
+ for(std::vector<VideoCodecConfig*>::size_type i=0;i < codecConfigList.size();i++)
+ {
+ //if the codec param is invalid or diplicate, return error
+ if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError)
+ {
+ return condError;
+ }
+
+ // Check for the keyframe request type: PLI is preferred
+ // over FIR, and FIR is preferred over none.
+ if (codecConfigList[i]->RtcpFbNackIsSet("pli"))
+ {
+ kf_request = webrtc::kViEKeyFrameRequestPliRtcp;
+ } else if(kf_request == webrtc::kViEKeyFrameRequestNone &&
+ codecConfigList[i]->RtcpFbCcmIsSet("fir"))
+ {
+ kf_request = webrtc::kViEKeyFrameRequestFirRtcp;
+ }
+
+ // Check whether NACK is requested
+ if(codecConfigList[i]->RtcpFbNackIsSet(""))
+ {
+ use_nack_basic = true;
+ }
+
+ // Check whether TMMBR is requested
+ if (codecConfigList[i]->RtcpFbCcmIsSet("tmmbr")) {
+ use_tmmbr = true;
+ }
+
+ // Check whether REMB is requested
+ if (codecConfigList[i]->RtcpFbRembIsSet()) {
+ use_remb = true;
+ }
+
+ // Check whether FEC is requested
+ if (codecConfigList[i]->RtcpFbFECIsSet()) {
+ use_fec = true;
+ }
+
+ webrtc::VideoCodec video_codec;
+
+ memset(&video_codec, 0, sizeof(webrtc::VideoCodec));
+
+ if (mExternalRecvCodec &&
+ codecConfigList[i]->mType == mExternalRecvCodec->mType) {
+ CSFLogError(logTag, "%s Configuring External H264 Receive Codec", __FUNCTION__);
+
+ // XXX Do we need a separate setting for receive maxbitrate? Is it
+ // different for hardware codecs? For now assume symmetry.
+ CodecConfigToWebRTCCodec(codecConfigList[i], video_codec);
+
+ // values SetReceiveCodec() cares about are name, type, maxbitrate
+ if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1)
+ {
+ CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ } else {
+ CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__,
+ codecConfigList[i]->mName.c_str());
+ success = true;
+ }
+ } else {
+ //Retrieve pre-populated codec structure for our codec.
+ for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++)
+ {
+ if(mPtrViECodec->GetCodec(idx, video_codec) == 0)
+ {
+ payloadName = video_codec.plName;
+ if(codecConfigList[i]->mName.compare(payloadName) == 0)
+ {
+ CodecConfigToWebRTCCodec(codecConfigList[i], video_codec);
+ if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1)
+ {
+ CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ } else {
+ CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__,
+ codecConfigList[i]->mName.c_str());
+ success = true;
+ }
+ break; //we found a match
+ }
+ }
+ }//end for codeclist
+ }
+ }//end for
+
+ if(!success)
+ {
+ CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__);
+ return kMediaConduitInvalidReceiveCodec;
+ }
+
+ if (!mVideoCodecStat) {
+ mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec);
+ }
+ mVideoCodecStat->Register(false);
+
+ // XXX Currently, we gather up all of the feedback types that the remote
+ // party indicated it supports for all video codecs and configure the entire
+ // conduit based on those capabilities. This is technically out of spec,
+ // as these values should be configured on a per-codec basis. However,
+ // the video engine only provides this API on a per-conduit basis, so that's
+ // how we have to do it. The approach of considering the remote capablities
+ // for the entire conduit to be a union of all remote codec capabilities
+ // (rather than the more conservative approach of using an intersection)
+ // is made to provide as many feedback mechanisms as are likely to be
+ // processed by the remote party (and should be relatively safe, since the
+ // remote party is required to ignore feedback types that it does not
+ // understand).
+ //
+ // Note that our configuration uses this union of remote capabilites as
+ // input to the configuration. It is not isomorphic to the configuration.
+ // For example, it only makes sense to have one frame request mechanism
+ // active at a time; so, if the remote party indicates more than one
+ // supported mechanism, we're only configuring the one we most prefer.
+ //
+ // See http://code.google.com/p/webrtc/issues/detail?id=2331
+
+ if (kf_request != webrtc::kViEKeyFrameRequestNone)
+ {
+ CSFLogDebug(logTag, "Enabling %s frame requests for video stream\n",
+ (kf_request == webrtc::kViEKeyFrameRequestPliRtcp ?
+ "PLI" : "FIR"));
+ if(mPtrRTP->SetKeyFrameRequestMethod(mChannel, kf_request) != 0)
+ {
+ CSFLogError(logTag, "%s KeyFrameRequest Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitKeyFrameRequestError;
+ }
+ }
+
+ switch (kf_request) {
+ case webrtc::kViEKeyFrameRequestNone:
+ mFrameRequestMethod = FrameRequestNone;
+ break;
+ case webrtc::kViEKeyFrameRequestPliRtcp:
+ mFrameRequestMethod = FrameRequestPli;
+ break;
+ case webrtc::kViEKeyFrameRequestFirRtcp:
+ mFrameRequestMethod = FrameRequestFir;
+ break;
+ default:
+ MOZ_ASSERT(false);
+ mFrameRequestMethod = FrameRequestUnknown;
+ }
+
+ if (use_fec)
+ {
+ uint8_t payload_type_red = INVALID_RTP_PAYLOAD;
+ uint8_t payload_type_ulpfec = INVALID_RTP_PAYLOAD;
+ if (!DetermineREDAndULPFECPayloadTypes(payload_type_red, payload_type_ulpfec)) {
+ CSFLogError(logTag, "%s Unable to set FEC status: could not determine"
+ "payload type: red %u ulpfec %u",
+ __FUNCTION__, payload_type_red, payload_type_ulpfec);
+ return kMediaConduitFECStatusError;
+ }
+
+ // We also need to call SetReceiveCodec for RED and ULPFEC codecs
+ for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) {
+ webrtc::VideoCodec video_codec;
+ if(mPtrViECodec->GetCodec(idx, video_codec) == 0) {
+ payloadName = video_codec.plName;
+ if(video_codec.codecType == webrtc::VideoCodecType::kVideoCodecRED ||
+ video_codec.codecType == webrtc::VideoCodecType::kVideoCodecULPFEC) {
+ if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) {
+ CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ } else {
+ CSFLogDebug(logTag, "%s Successfully Set the codec %s", __FUNCTION__,
+ video_codec.plName);
+ }
+ }
+ }
+ }
+
+ if (use_nack_basic) {
+ CSFLogDebug(logTag, "Enabling NACK/FEC (recv) for video stream\n");
+ if (mPtrRTP->SetHybridNACKFECStatus(mChannel, true,
+ payload_type_red,
+ payload_type_ulpfec) != 0) {
+ CSFLogError(logTag, "%s SetHybridNACKFECStatus Failed %d ",
+ __FUNCTION__, mPtrViEBase->LastError());
+ return kMediaConduitNACKStatusError;
+ }
+ } else {
+ CSFLogDebug(logTag, "Enabling FEC (recv) for video stream\n");
+ if (mPtrRTP->SetFECStatus(mChannel, true,
+ payload_type_red, payload_type_ulpfec) != 0)
+ {
+ CSFLogError(logTag, "%s SetFECStatus Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitNACKStatusError;
+ }
+ }
+ } else if(use_nack_basic) {
+ CSFLogDebug(logTag, "Enabling NACK (recv) for video stream\n");
+ if (mPtrRTP->SetNACKStatus(mChannel, true) != 0)
+ {
+ CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitNACKStatusError;
+ }
+ }
+ mUsingNackBasic = use_nack_basic;
+ mUsingFEC = use_fec;
+
+ if (use_tmmbr) {
+ CSFLogDebug(logTag, "Enabling TMMBR for video stream");
+ if (mPtrRTP->SetTMMBRStatus(mChannel, true) != 0) {
+ CSFLogError(logTag, "%s SetTMMBRStatus Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitTMMBRStatusError;
+ }
+ }
+ mUsingTmmbr = use_tmmbr;
+
+ condError = StartReceiving();
+ if (condError != kMediaConduitNoError) {
+ return condError;
+ }
+
+ // by now we should be successfully started the reception
+ CSFLogDebug(logTag, "REMB enabled for video stream %s",
+ (use_remb ? "yes" : "no"));
+ mPtrRTP->SetRembStatus(mChannel, use_remb, true);
+ return kMediaConduitNoError;
+}
+
+template<typename T>
+T MinIgnoreZero(const T& a, const T& b)
+{
+ return std::min(a? a:b, b? b:a);
+}
+
+struct ResolutionAndBitrateLimits {
+ uint32_t resolution_in_mb;
+ uint16_t min_bitrate;
+ uint16_t start_bitrate;
+ uint16_t max_bitrate;
+};
+
+#define MB_OF(w,h) ((unsigned int)((((w+15)>>4))*((unsigned int)((h+15)>>4))))
+
+// For now, try to set the max rates well above the knee in the curve.
+// Chosen somewhat arbitrarily; it's hard to find good data oriented for
+// realtime interactive/talking-head recording. These rates assume
+// 30fps.
+
+// XXX Populate this based on a pref (which we should consider sorting because
+// people won't assume they need to).
+static ResolutionAndBitrateLimits kResolutionAndBitrateLimits[] = {
+ {MB_OF(1920, 1200), 1500, 2000, 10000}, // >HD (3K, 4K, etc)
+ {MB_OF(1280, 720), 1200, 1500, 5000}, // HD ~1080-1200
+ {MB_OF(800, 480), 600, 800, 2500}, // HD ~720
+ {tl::Max<MB_OF(400, 240), MB_OF(352, 288)>::value, 200, 300, 1300}, // VGA, WVGA
+ {MB_OF(176, 144), 100, 150, 500}, // WQVGA, CIF
+ {0 , 40, 80, 250} // QCIF and below
+};
+
+void
+WebrtcVideoConduit::SelectBitrates(unsigned short width,
+ unsigned short height,
+ unsigned int cap,
+ mozilla::Atomic<int32_t, mozilla::Relaxed>& aLastFramerateTenths,
+ unsigned int& out_min,
+ unsigned int& out_start,
+ unsigned int& out_max)
+{
+ // max bandwidth should be proportional (not linearly!) to resolution, and
+ // proportional (perhaps linearly, or close) to current frame rate.
+ unsigned int fs = MB_OF(width, height);
+
+ for (ResolutionAndBitrateLimits resAndLimits : kResolutionAndBitrateLimits) {
+ if (fs > resAndLimits.resolution_in_mb &&
+ // pick the highest range where at least start rate is within cap
+ // (or if we're at the end of the array).
+ (!cap || resAndLimits.start_bitrate <= cap ||
+ resAndLimits.resolution_in_mb == 0)) {
+ out_min = MinIgnoreZero((unsigned int)resAndLimits.min_bitrate, cap);
+ out_start = MinIgnoreZero((unsigned int)resAndLimits.start_bitrate, cap);
+ out_max = MinIgnoreZero((unsigned int)resAndLimits.max_bitrate, cap);
+ break;
+ }
+ }
+
+ // mLastFramerateTenths is an atomic, and scaled by *10
+ double framerate = std::min((aLastFramerateTenths/10.),60.0);
+ MOZ_ASSERT(framerate > 0);
+ // Now linear reduction/increase based on fps (max 60fps i.e. doubling)
+ if (framerate >= 10) {
+ out_min = out_min * (framerate/30);
+ out_start = out_start * (framerate/30);
+ out_max = std::max((unsigned int)(out_max * (framerate/30)), cap);
+ } else {
+ // At low framerates, don't reduce bandwidth as much - cut slope to 1/2.
+ // Mostly this would be ultra-low-light situations/mobile or screensharing.
+ out_min = out_min * ((10-(framerate/2))/30);
+ out_start = out_start * ((10-(framerate/2))/30);
+ out_max = std::max((unsigned int)(out_max * ((10-(framerate/2))/30)), cap);
+ }
+
+ if (mMinBitrate && mMinBitrate > out_min) {
+ out_min = mMinBitrate;
+ }
+ // If we try to set a minimum bitrate that is too low, ViE will reject it.
+ out_min = std::max((unsigned int) webrtc::kViEMinCodecBitrate,
+ out_min);
+ if (mStartBitrate && mStartBitrate > out_start) {
+ out_start = mStartBitrate;
+ }
+ out_start = std::max(out_start, out_min);
+
+ // Note: mMaxBitrate is the max transport bitrate - it applies to a
+ // single codec encoding, but should also apply to the sum of all
+ // simulcast layers in this encoding!
+ // So sum(layers.maxBitrate) <= mMaxBitrate
+ if (mMaxBitrate && mMaxBitrate > out_max) {
+ out_max = mMaxBitrate;
+ }
+}
+
+static void ConstrainPreservingAspectRatioExact(uint32_t max_fs,
+ unsigned short* width,
+ unsigned short* height)
+{
+ // We could try to pick a better starting divisor, but it won't make any real
+ // performance difference.
+ for (size_t d = 1; d < std::min(*width, *height); ++d) {
+ if ((*width % d) || (*height % d)) {
+ continue; // Not divisible
+ }
+
+ if (((*width) * (*height))/(d*d) <= max_fs) {
+ *width /= d;
+ *height /= d;
+ return;
+ }
+ }
+
+ *width = 0;
+ *height = 0;
+}
+
+static void ConstrainPreservingAspectRatio(uint16_t max_width,
+ uint16_t max_height,
+ unsigned short* width,
+ unsigned short* height)
+{
+ if (((*width) <= max_width) && ((*height) <= max_height)) {
+ return;
+ }
+
+ if ((*width) * max_height > max_width * (*height))
+ {
+ (*height) = max_width * (*height) / (*width);
+ (*width) = max_width;
+ }
+ else
+ {
+ (*width) = max_height * (*width) / (*height);
+ (*height) = max_height;
+ }
+}
+
+// XXX we need to figure out how to feed back changes in preferred capture
+// resolution to the getUserMedia source.
+// Returns boolean if we've submitted an async change (and took ownership
+// of *frame's data)
+bool
+WebrtcVideoConduit::SelectSendResolution(unsigned short width,
+ unsigned short height,
+ webrtc::I420VideoFrame *frame) // may be null
+{
+ mCodecMutex.AssertCurrentThreadOwns();
+ // XXX This will do bandwidth-resolution adaptation as well - bug 877954
+
+ mLastWidth = width;
+ mLastHeight = height;
+ // Enforce constraints
+ if (mCurSendCodecConfig) {
+ uint16_t max_width = mCurSendCodecConfig->mEncodingConstraints.maxWidth;
+ uint16_t max_height = mCurSendCodecConfig->mEncodingConstraints.maxHeight;
+ if (max_width || max_height) {
+ max_width = max_width ? max_width : UINT16_MAX;
+ max_height = max_height ? max_height : UINT16_MAX;
+ ConstrainPreservingAspectRatio(max_width, max_height, &width, &height);
+ }
+
+ // Limit resolution to max-fs while keeping same aspect ratio as the
+ // incoming image.
+ if (mCurSendCodecConfig->mEncodingConstraints.maxFs)
+ {
+ uint32_t max_fs = mCurSendCodecConfig->mEncodingConstraints.maxFs;
+ unsigned int cur_fs, mb_width, mb_height, mb_max;
+
+ // Could we make this simpler by picking the larger of width and height,
+ // calculating a max for just that value based on the scale parameter,
+ // and then let ConstrainPreservingAspectRatio do the rest?
+ mb_width = (width + 15) >> 4;
+ mb_height = (height + 15) >> 4;
+
+ cur_fs = mb_width * mb_height;
+
+ // Limit resolution to max_fs, but don't scale up.
+ if (cur_fs > max_fs)
+ {
+ double scale_ratio;
+
+ scale_ratio = sqrt((double) max_fs / (double) cur_fs);
+
+ mb_width = mb_width * scale_ratio;
+ mb_height = mb_height * scale_ratio;
+
+ // Adjust mb_width and mb_height if they were truncated to zero.
+ if (mb_width == 0) {
+ mb_width = 1;
+ mb_height = std::min(mb_height, max_fs);
+ }
+ if (mb_height == 0) {
+ mb_height = 1;
+ mb_width = std::min(mb_width, max_fs);
+ }
+ }
+
+ // Limit width/height seperately to limit effect of extreme aspect ratios.
+ mb_max = (unsigned) sqrt(8 * (double) max_fs);
+
+ max_width = 16 * std::min(mb_width, mb_max);
+ max_height = 16 * std::min(mb_height, mb_max);
+ ConstrainPreservingAspectRatio(max_width, max_height, &width, &height);
+ }
+ }
+
+
+ // Adapt to getUserMedia resolution changes
+ // check if we need to reconfigure the sending resolution.
+ bool changed = false;
+ if (mSendingWidth != width || mSendingHeight != height)
+ {
+ CSFLogDebug(logTag, "%s: resolution changing to %ux%u (from %ux%u)",
+ __FUNCTION__, width, height, mSendingWidth, mSendingHeight);
+ // This will avoid us continually retrying this operation if it fails.
+ // If the resolution changes, we'll try again. In the meantime, we'll
+ // keep using the old size in the encoder.
+ mSendingWidth = width;
+ mSendingHeight = height;
+ changed = true;
+ }
+
+ // uses mSendingWidth/Height
+ unsigned int framerate = SelectSendFrameRate(mSendingFramerate);
+ if (mSendingFramerate != framerate) {
+ CSFLogDebug(logTag, "%s: framerate changing to %u (from %u)",
+ __FUNCTION__, framerate, mSendingFramerate);
+ mSendingFramerate = framerate;
+ changed = true;
+ }
+
+ if (changed) {
+ // On a resolution change, bounce this to the correct thread to
+ // re-configure (same as used for Init(). Do *not* block the calling
+ // thread since that may be the MSG thread.
+
+ // MUST run on the same thread as Init()/etc
+ if (!NS_IsMainThread()) {
+ // Note: on *initial* config (first frame), best would be to drop
+ // frames until the config is done, then encode the most recent frame
+ // provided and continue from there. We don't do this, but we do drop
+ // all frames while in the process of a reconfig and then encode the
+ // frame that started the reconfig, which is close. There may be
+ // barely perceptible glitch in the video due to the dropped frame(s).
+ mInReconfig = true;
+
+ // We can't pass a UniquePtr<> or unique_ptr<> to a lambda directly
+ webrtc::I420VideoFrame *new_frame = nullptr;
+ if (frame) {
+ new_frame = new webrtc::I420VideoFrame();
+ // the internal buffer pointer is refcounted, so we don't have 2 copies here
+ new_frame->ShallowCopy(*frame);
+ }
+ RefPtr<WebrtcVideoConduit> self(this);
+ RefPtr<Runnable> webrtc_runnable =
+ media::NewRunnableFrom([self, width, height, new_frame]() -> nsresult {
+ UniquePtr<webrtc::I420VideoFrame> local_frame(new_frame); // Simplify cleanup
+
+ MutexAutoLock lock(self->mCodecMutex);
+ return self->ReconfigureSendCodec(width, height, new_frame);
+ });
+ // new_frame now owned by lambda
+ CSFLogDebug(logTag, "%s: proxying lambda to WebRTC thread for reconfig (width %u/%u, height %u/%u",
+ __FUNCTION__, width, mLastWidth, height, mLastHeight);
+ NS_DispatchToMainThread(webrtc_runnable.forget());
+ if (new_frame) {
+ return true; // queued it
+ }
+ } else {
+ // already on the right thread
+ ReconfigureSendCodec(width, height, frame);
+ }
+ }
+ return false;
+}
+
+nsresult
+WebrtcVideoConduit::ReconfigureSendCodec(unsigned short width,
+ unsigned short height,
+ webrtc::I420VideoFrame *frame)
+{
+ mCodecMutex.AssertCurrentThreadOwns();
+
+ // Get current vie codec.
+ webrtc::VideoCodec vie_codec;
+ int32_t err;
+
+ mInReconfig = false;
+ if ((err = mPtrViECodec->GetSendCodec(mChannel, vie_codec)) != 0)
+ {
+ CSFLogError(logTag, "%s: GetSendCodec failed, err %d", __FUNCTION__, err);
+ return NS_ERROR_FAILURE;
+ }
+
+ CSFLogDebug(logTag,
+ "%s: Requesting resolution change to %ux%u (from %ux%u)",
+ __FUNCTION__, width, height, vie_codec.width, vie_codec.height);
+
+ if (mRtpStreamIdEnabled) {
+ vie_codec.ridId = mRtpStreamIdExtId;
+ }
+
+ vie_codec.width = width;
+ vie_codec.height = height;
+ vie_codec.maxFramerate = mSendingFramerate;
+ SelectBitrates(vie_codec.width, vie_codec.height, 0,
+ mLastFramerateTenths,
+ vie_codec.minBitrate,
+ vie_codec.startBitrate,
+ vie_codec.maxBitrate);
+
+ // These are based on lowest-fidelity, because if there is insufficient
+ // bandwidth for all streams, only the lowest fidelity one will be sent.
+ uint32_t minMinBitrate = 0;
+ uint32_t minStartBitrate = 0;
+ // Total for all simulcast streams.
+ uint32_t totalMaxBitrate = 0;
+
+ for (size_t i = vie_codec.numberOfSimulcastStreams; i > 0; --i) {
+ webrtc::SimulcastStream& stream(vie_codec.simulcastStream[i - 1]);
+ stream.width = width;
+ stream.height = height;
+ MOZ_ASSERT(stream.jsScaleDownBy >= 1.0);
+ uint32_t new_width = uint32_t(width / stream.jsScaleDownBy);
+ uint32_t new_height = uint32_t(height / stream.jsScaleDownBy);
+ // TODO: If two layers are similar, only alloc bits to one (Bug 1249859)
+ if (new_width != width || new_height != height) {
+ if (vie_codec.numberOfSimulcastStreams == 1) {
+ // Use less strict scaling in unicast. That way 320x240 / 3 = 106x79.
+ ConstrainPreservingAspectRatio(new_width, new_height,
+ &stream.width, &stream.height);
+ } else {
+ // webrtc.org supposedly won't tolerate simulcast unless every stream
+ // is exactly the same aspect ratio. 320x240 / 3 = 80x60.
+ ConstrainPreservingAspectRatioExact(new_width*new_height,
+ &stream.width, &stream.height);
+ }
+ }
+ // Give each layer default appropriate bandwidth limits based on the
+ // resolution/framerate of that layer
+ SelectBitrates(stream.width, stream.height,
+ MinIgnoreZero(stream.jsMaxBitrate, vie_codec.maxBitrate),
+ mLastFramerateTenths,
+ stream.minBitrate,
+ stream.targetBitrate,
+ stream.maxBitrate);
+
+ // webrtc.org expects the last, highest fidelity, simulcast stream to
+ // always have the same resolution as vie_codec
+ // Also set the least user-constrained of the stream bitrates on vie_codec.
+ if (i == vie_codec.numberOfSimulcastStreams) {
+ vie_codec.width = stream.width;
+ vie_codec.height = stream.height;
+ }
+ minMinBitrate = MinIgnoreZero(stream.minBitrate, minMinBitrate);
+ minStartBitrate = MinIgnoreZero(stream.targetBitrate, minStartBitrate);
+ totalMaxBitrate += stream.maxBitrate;
+ }
+ if (vie_codec.numberOfSimulcastStreams != 0) {
+ vie_codec.minBitrate = std::max(minMinBitrate, vie_codec.minBitrate);
+ vie_codec.maxBitrate = std::min(totalMaxBitrate, vie_codec.maxBitrate);
+ vie_codec.startBitrate = std::max(vie_codec.minBitrate,
+ std::min(minStartBitrate,
+ vie_codec.maxBitrate));
+ }
+ vie_codec.mode = mCodecMode;
+ if ((err = mPtrViECodec->SetSendCodec(mChannel, vie_codec)) != 0)
+ {
+ CSFLogError(logTag, "%s: SetSendCodec(%ux%u) failed, err %d",
+ __FUNCTION__, width, height, err);
+ return NS_ERROR_FAILURE;
+ }
+ if (mMinBitrateEstimate != 0) {
+ mPtrViENetwork->SetBitrateConfig(mChannel,
+ mMinBitrateEstimate,
+ std::max(vie_codec.startBitrate,
+ mMinBitrateEstimate),
+ std::max(vie_codec.maxBitrate,
+ mMinBitrateEstimate));
+ }
+
+ CSFLogDebug(logTag, "%s: Encoder resolution changed to %ux%u @ %ufps, bitrate %u:%u",
+ __FUNCTION__, width, height, mSendingFramerate,
+ vie_codec.minBitrate, vie_codec.maxBitrate);
+ if (frame) {
+ // XXX I really don't like doing this from MainThread...
+ mPtrExtCapture->IncomingFrame(*frame);
+ mVideoCodecStat->SentFrame();
+ CSFLogDebug(logTag, "%s Inserted a frame from reconfig lambda", __FUNCTION__);
+ }
+ return NS_OK;
+}
+
+// Invoked under lock of mCodecMutex!
+unsigned int
+WebrtcVideoConduit::SelectSendFrameRate(unsigned int framerate) const
+{
+ mCodecMutex.AssertCurrentThreadOwns();
+ unsigned int new_framerate = framerate;
+
+ // Limit frame rate based on max-mbps
+ if (mCurSendCodecConfig && mCurSendCodecConfig->mEncodingConstraints.maxMbps)
+ {
+ unsigned int cur_fs, mb_width, mb_height, max_fps;
+
+ mb_width = (mSendingWidth + 15) >> 4;
+ mb_height = (mSendingHeight + 15) >> 4;
+
+ cur_fs = mb_width * mb_height;
+ if (cur_fs > 0) { // in case no frames have been sent
+ max_fps = mCurSendCodecConfig->mEncodingConstraints.maxMbps/cur_fs;
+ if (max_fps < mSendingFramerate) {
+ new_framerate = max_fps;
+ }
+
+ if (mCurSendCodecConfig->mEncodingConstraints.maxFps != 0 &&
+ mCurSendCodecConfig->mEncodingConstraints.maxFps < mSendingFramerate) {
+ new_framerate = mCurSendCodecConfig->mEncodingConstraints.maxFps;
+ }
+ }
+ }
+ return new_framerate;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SetExternalSendCodec(VideoCodecConfig* config,
+ VideoEncoder* encoder) {
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+ if (!mPtrExtCodec->RegisterExternalSendCodec(mChannel,
+ config->mType,
+ static_cast<WebrtcVideoEncoder*>(encoder),
+ false)) {
+ mExternalSendCodecHandle = encoder;
+ mExternalSendCodec = new VideoCodecConfig(*config);
+ return kMediaConduitNoError;
+ }
+ return kMediaConduitInvalidSendCodec;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SetExternalRecvCodec(VideoCodecConfig* config,
+ VideoDecoder* decoder) {
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+ if (!mPtrExtCodec->RegisterExternalReceiveCodec(mChannel,
+ config->mType,
+ static_cast<WebrtcVideoDecoder*>(decoder))) {
+ mExternalRecvCodecHandle = decoder;
+ mExternalRecvCodec = new VideoCodecConfig(*config);
+ return kMediaConduitNoError;
+ }
+ return kMediaConduitInvalidReceiveCodec;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::EnableRTPStreamIdExtension(bool enabled, uint8_t id) {
+ mRtpStreamIdEnabled = enabled;
+ mRtpStreamIdExtId = id;
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SendVideoFrame(unsigned char* video_frame,
+ unsigned int video_frame_length,
+ unsigned short width,
+ unsigned short height,
+ VideoType video_type,
+ uint64_t capture_time)
+{
+
+ //check for the parameters sanity
+ if(!video_frame || video_frame_length == 0 ||
+ width == 0 || height == 0)
+ {
+ CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__);
+ MOZ_ASSERT(false);
+ return kMediaConduitMalformedArgument;
+ }
+ MOZ_ASSERT(video_type == VideoType::kVideoI420);
+ MOZ_ASSERT(mPtrExtCapture);
+
+ // Transmission should be enabled before we insert any frames.
+ if(!mEngineTransmitting)
+ {
+ CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ // insert the frame to video engine in I420 format only
+ webrtc::I420VideoFrame i420_frame;
+ i420_frame.CreateFrame(video_frame, width, height, webrtc::kVideoRotation_0);
+ i420_frame.set_timestamp(capture_time);
+ i420_frame.set_render_time_ms(capture_time);
+
+ return SendVideoFrame(i420_frame);
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SendVideoFrame(webrtc::I420VideoFrame& frame)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ // See if we need to recalculate what we're sending.
+ // Don't compare mSendingWidth/Height, since those may not be the same as the input.
+ {
+ MutexAutoLock lock(mCodecMutex);
+ if (mInReconfig) {
+ // Waiting for it to finish
+ return kMediaConduitNoError;
+ }
+ if (frame.width() != mLastWidth || frame.height() != mLastHeight) {
+ CSFLogDebug(logTag, "%s: call SelectSendResolution with %ux%u",
+ __FUNCTION__, frame.width(), frame.height());
+ if (SelectSendResolution(frame.width(), frame.height(), &frame)) {
+ // SelectSendResolution took ownership of the data in i420_frame.
+ // Submit the frame after reconfig is done
+ return kMediaConduitNoError;
+ }
+ }
+ }
+ mPtrExtCapture->IncomingFrame(frame);
+
+ mVideoCodecStat->SentFrame();
+ CSFLogDebug(logTag, "%s Inserted a frame", __FUNCTION__);
+ return kMediaConduitNoError;
+}
+
+// Transport Layer Callbacks
+MediaConduitErrorCode
+WebrtcVideoConduit::ReceivedRTPPacket(const void *data, int len)
+{
+ CSFLogDebug(logTag, "%s: seq# %u, Channel %d, Len %d ", __FUNCTION__,
+ (uint16_t) ntohs(((uint16_t*) data)[1]), mChannel, len);
+
+ // Media Engine should be receiving already.
+ if(mEngineReceiving)
+ {
+ // let the engine know of a RTP packet to decode
+ // XXX we need to get passed the time the packet was received
+ if(mPtrViENetwork->ReceivedRTPPacket(mChannel, data, len, webrtc::PacketTime()) == -1)
+ {
+ int error = mPtrViEBase->LastError();
+ CSFLogError(logTag, "%s RTP Processing Failed %d ", __FUNCTION__, error);
+ if(error >= kViERtpRtcpInvalidChannelId && error <= kViERtpRtcpRtcpDisabled)
+ {
+ return kMediaConduitRTPProcessingFailed;
+ }
+ return kMediaConduitRTPRTCPModuleError;
+ }
+ } else {
+ CSFLogError(logTag, "Error: %s when not receiving", __FUNCTION__);
+ return kMediaConduitSessionNotInited;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::ReceivedRTCPPacket(const void *data, int len)
+{
+ CSFLogDebug(logTag, " %s Channel %d, Len %d ", __FUNCTION__, mChannel, len);
+
+ //Media Engine should be receiving already
+ if(mPtrViENetwork->ReceivedRTCPPacket(mChannel,data,len) == -1)
+ {
+ int error = mPtrViEBase->LastError();
+ CSFLogError(logTag, "%s RTCP Processing Failed %d", __FUNCTION__, error);
+ if(error >= kViERtpRtcpInvalidChannelId && error <= kViERtpRtcpRtcpDisabled)
+ {
+ return kMediaConduitRTPProcessingFailed;
+ }
+ return kMediaConduitRTPRTCPModuleError;
+ }
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::StopTransmitting()
+{
+ if(mEngineTransmitting)
+ {
+ CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__);
+ if(mPtrViEBase->StopSend(mChannel) == -1)
+ {
+ CSFLogError(logTag, "%s StopSend() Failed %d ",__FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+
+ mEngineTransmitting = false;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::StartTransmitting()
+{
+ if (!mEngineTransmitting) {
+ if(mPtrViEBase->StartSend(mChannel) == -1)
+ {
+ CSFLogError(logTag, "%s Start Send Error %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+
+ mEngineTransmitting = true;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::StopReceiving()
+{
+ NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
+ // Are we receiving already? If so, stop receiving and playout
+ // since we can't apply new recv codec when the engine is playing.
+ if(mEngineReceiving)
+ {
+ CSFLogDebug(logTag, "%s Engine Already Receiving . Attemping to Stop ", __FUNCTION__);
+ if(mPtrViEBase->StopReceive(mChannel) == -1)
+ {
+ int error = mPtrViEBase->LastError();
+ if(error == kViEBaseUnknownError)
+ {
+ CSFLogDebug(logTag, "%s StopReceive() Success ", __FUNCTION__);
+ } else {
+ CSFLogError(logTag, "%s StopReceive() Failed %d ", __FUNCTION__,
+ mPtrViEBase->LastError());
+ return kMediaConduitUnknownError;
+ }
+ }
+ mEngineReceiving = false;
+ }
+
+ return kMediaConduitNoError;
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::StartReceiving()
+{
+ if (!mEngineReceiving) {
+ CSFLogDebug(logTag, "%s Attemping to start... ", __FUNCTION__);
+ //Start Receive on the video engine
+ if(mPtrViEBase->StartReceive(mChannel) == -1)
+ {
+ int error = mPtrViEBase->LastError();
+ CSFLogError(logTag, "%s Start Receive Error %d ", __FUNCTION__, error);
+
+ return kMediaConduitUnknownError;
+ }
+
+ mEngineReceiving = true;
+ }
+
+ return kMediaConduitNoError;
+}
+
+//WebRTC::RTP Callback Implementation
+// Called on MSG thread
+int WebrtcVideoConduit::SendPacket(int channel, const void* data, size_t len)
+{
+ CSFLogDebug(logTag, "%s : channel %d len %lu", __FUNCTION__, channel, (unsigned long) len);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mTransmitterTransport &&
+ (mTransmitterTransport->SendRtpPacket(data, len) == NS_OK))
+ {
+ CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__);
+ return len;
+ } else {
+ CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
+ return -1;
+ }
+}
+
+// Called from multiple threads including webrtc Process thread
+int WebrtcVideoConduit::SendRTCPPacket(int channel, const void* data, size_t len)
+{
+ CSFLogDebug(logTag, "%s : channel %d , len %lu ", __FUNCTION__, channel, (unsigned long) len);
+
+ // We come here if we have only one pipeline/conduit setup,
+ // such as for unidirectional streams.
+ // We also end up here if we are receiving
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mReceiverTransport &&
+ mReceiverTransport->SendRtcpPacket(data, len) == NS_OK)
+ {
+ // Might be a sender report, might be a receiver report, we don't know.
+ CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__);
+ return len;
+ } else if(mTransmitterTransport &&
+ (mTransmitterTransport->SendRtcpPacket(data, len) == NS_OK)) {
+ CSFLogDebug(logTag, "%s Sent RTCP Packet (sender report) ", __FUNCTION__);
+ return len;
+ } else {
+ CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__);
+ return -1;
+ }
+}
+
+// WebRTC::ExternalMedia Implementation
+int
+WebrtcVideoConduit::FrameSizeChange(unsigned int width,
+ unsigned int height,
+ unsigned int numStreams)
+{
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ mReceivingWidth = width;
+ mReceivingHeight = height;
+ mNumReceivingStreams = numStreams;
+
+ if(mRenderer)
+ {
+ mRenderer->FrameSizeChange(width, height, numStreams);
+ return 0;
+ }
+
+ CSFLogError(logTag, "%s Renderer is NULL ", __FUNCTION__);
+ return -1;
+}
+
+int
+WebrtcVideoConduit::DeliverFrame(unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t time_stamp,
+ int64_t ntp_time_ms,
+ int64_t render_time,
+ void *handle)
+{
+ return DeliverFrame(buffer, buffer_size, mReceivingWidth, (mReceivingWidth+1)>>1,
+ time_stamp, ntp_time_ms, render_time, handle);
+}
+
+int
+WebrtcVideoConduit::DeliverFrame(unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t y_stride,
+ uint32_t cbcr_stride,
+ uint32_t time_stamp,
+ int64_t ntp_time_ms,
+ int64_t render_time,
+ void *handle)
+{
+ CSFLogDebug(logTag, "%s Buffer Size %lu", __FUNCTION__, (unsigned long) buffer_size);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mRenderer)
+ {
+ layers::Image* img = nullptr;
+ // |handle| should be a webrtc::NativeHandle if available.
+ if (handle) {
+ webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(handle);
+ // In the handle, there should be a layers::Image.
+ img = static_cast<layers::Image*>(native_h->GetHandle());
+ }
+
+ if (mVideoLatencyTestEnable && mReceivingWidth && mReceivingHeight) {
+ uint64_t now = PR_Now();
+ uint64_t timestamp = 0;
+ bool ok = YuvStamper::Decode(mReceivingWidth, mReceivingHeight, mReceivingWidth,
+ buffer,
+ reinterpret_cast<unsigned char*>(&timestamp),
+ sizeof(timestamp), 0, 0);
+ if (ok) {
+ VideoLatencyUpdate(now - timestamp);
+ }
+ }
+
+ const ImageHandle img_h(img);
+ mRenderer->RenderVideoFrame(buffer, buffer_size, y_stride, cbcr_stride,
+ time_stamp, render_time, img_h);
+ return 0;
+ }
+
+ CSFLogError(logTag, "%s Renderer is NULL ", __FUNCTION__);
+ return -1;
+}
+
+int
+WebrtcVideoConduit::DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame)
+{
+ if (!webrtc_frame.native_handle()) {
+ uint32_t y_stride = webrtc_frame.stride(static_cast<webrtc::PlaneType>(0));
+ return DeliverFrame(const_cast<uint8_t*>(webrtc_frame.buffer(webrtc::kYPlane)),
+ CalcBufferSize(webrtc::kI420, y_stride, webrtc_frame.height()),
+ y_stride,
+ webrtc_frame.stride(static_cast<webrtc::PlaneType>(1)),
+ webrtc_frame.timestamp(),
+ webrtc_frame.ntp_time_ms(),
+ webrtc_frame.render_time_ms(), nullptr);
+ }
+ size_t buffer_size = CalcBufferSize(webrtc::kI420, webrtc_frame.width(), webrtc_frame.height());
+ CSFLogDebug(logTag, "%s Buffer Size %lu", __FUNCTION__, (unsigned long) buffer_size);
+
+ ReentrantMonitorAutoEnter enter(mTransportMonitor);
+ if(mRenderer)
+ {
+ layers::Image* img = nullptr;
+ // |handle| should be a webrtc::NativeHandle if available.
+ webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(webrtc_frame.native_handle());
+ if (native_h) {
+ // In the handle, there should be a layers::Image.
+ img = static_cast<layers::Image*>(native_h->GetHandle());
+ }
+
+#if 0
+ //#ifndef MOZ_WEBRTC_OMX
+ // XXX - this may not be possible on GONK with textures!
+ if (mVideoLatencyTestEnable && mReceivingWidth && mReceivingHeight) {
+ uint64_t now = PR_Now();
+ uint64_t timestamp = 0;
+ bool ok = YuvStamper::Decode(mReceivingWidth, mReceivingHeight, mReceivingWidth,
+ buffer,
+ reinterpret_cast<unsigned char*>(&timestamp),
+ sizeof(timestamp), 0, 0);
+ if (ok) {
+ VideoLatencyUpdate(now - timestamp);
+ }
+ }
+#endif
+
+ const ImageHandle img_h(img);
+ mRenderer->RenderVideoFrame(nullptr, buffer_size, webrtc_frame.timestamp(),
+ webrtc_frame.render_time_ms(), img_h);
+ return 0;
+ }
+
+ CSFLogError(logTag, "%s Renderer is NULL ", __FUNCTION__);
+ return -1;
+}
+
+/**
+ * Copy the codec passed into Conduit's database
+ */
+
+void
+WebrtcVideoConduit::CodecConfigToWebRTCCodec(const VideoCodecConfig* codecInfo,
+ webrtc::VideoCodec& cinst)
+{
+ // Note: this assumes cinst is initialized to a base state either by
+ // hand or from a config fetched with GetConfig(); this modifies the config
+ // to match parameters from VideoCodecConfig
+ cinst.plType = codecInfo->mType;
+ if (codecInfo->mName == "H264") {
+ cinst.codecType = webrtc::kVideoCodecH264;
+ PL_strncpyz(cinst.plName, "H264", sizeof(cinst.plName));
+ } else if (codecInfo->mName == "VP8") {
+ cinst.codecType = webrtc::kVideoCodecVP8;
+ PL_strncpyz(cinst.plName, "VP8", sizeof(cinst.plName));
+ } else if (codecInfo->mName == "VP9") {
+ cinst.codecType = webrtc::kVideoCodecVP9;
+ PL_strncpyz(cinst.plName, "VP9", sizeof(cinst.plName));
+ } else if (codecInfo->mName == "I420") {
+ cinst.codecType = webrtc::kVideoCodecI420;
+ PL_strncpyz(cinst.plName, "I420", sizeof(cinst.plName));
+ } else {
+ cinst.codecType = webrtc::kVideoCodecUnknown;
+ PL_strncpyz(cinst.plName, "Unknown", sizeof(cinst.plName));
+ }
+
+ // width/height will be overridden on the first frame; they must be 'sane' for
+ // SetSendCodec()
+ if (codecInfo->mEncodingConstraints.maxFps > 0) {
+ cinst.maxFramerate = codecInfo->mEncodingConstraints.maxFps;
+ } else {
+ cinst.maxFramerate = DEFAULT_VIDEO_MAX_FRAMERATE;
+ }
+
+ // Defaults if rates aren't forced by pref. Typically defaults are
+ // overridden on the first video frame.
+ cinst.minBitrate = mMinBitrate ? mMinBitrate : 200;
+ cinst.startBitrate = mStartBitrate ? mStartBitrate : 300;
+ cinst.targetBitrate = cinst.startBitrate;
+ cinst.maxBitrate = mMaxBitrate ? mMaxBitrate : 2000;
+
+ if (cinst.codecType == webrtc::kVideoCodecH264)
+ {
+#ifdef MOZ_WEBRTC_OMX
+ cinst.resolution_divisor = 16;
+#endif
+ // cinst.codecSpecific.H264.profile = ?
+ cinst.codecSpecific.H264.profile_byte = codecInfo->mProfile;
+ cinst.codecSpecific.H264.constraints = codecInfo->mConstraints;
+ cinst.codecSpecific.H264.level = codecInfo->mLevel;
+ cinst.codecSpecific.H264.packetizationMode = codecInfo->mPacketizationMode;
+ if (codecInfo->mEncodingConstraints.maxBr > 0) {
+ // webrtc.org uses kbps, we use bps
+ cinst.maxBitrate =
+ MinIgnoreZero(cinst.maxBitrate,
+ codecInfo->mEncodingConstraints.maxBr)/1000;
+ }
+ if (codecInfo->mEncodingConstraints.maxMbps > 0) {
+ // Not supported yet!
+ CSFLogError(logTag, "%s H.264 max_mbps not supported yet ", __FUNCTION__);
+ }
+ // XXX parse the encoded SPS/PPS data
+ // paranoia
+ cinst.codecSpecific.H264.spsData = nullptr;
+ cinst.codecSpecific.H264.spsLen = 0;
+ cinst.codecSpecific.H264.ppsData = nullptr;
+ cinst.codecSpecific.H264.ppsLen = 0;
+ }
+ // Init mSimulcastEncodings always since they hold info from setParameters.
+ // TODO(bug 1210175): H264 doesn't support simulcast yet.
+ size_t numberOfSimulcastEncodings = std::min(codecInfo->mSimulcastEncodings.size(), (size_t)webrtc::kMaxSimulcastStreams);
+ for (size_t i = 0; i < numberOfSimulcastEncodings; ++i) {
+ const VideoCodecConfig::SimulcastEncoding& encoding =
+ codecInfo->mSimulcastEncodings[i];
+ // Make sure the constraints on the whole stream are reflected.
+ webrtc::SimulcastStream stream;
+ memset(&stream, 0, sizeof(stream));
+ stream.width = cinst.width;
+ stream.height = cinst.height;
+ stream.numberOfTemporalLayers = 1;
+ stream.maxBitrate = cinst.maxBitrate;
+ stream.targetBitrate = cinst.targetBitrate;
+ stream.minBitrate = cinst.minBitrate;
+ stream.qpMax = cinst.qpMax;
+ strncpy(stream.rid, encoding.rid.c_str(), sizeof(stream.rid)-1);
+ stream.rid[sizeof(stream.rid) - 1] = 0;
+
+ // Apply encoding-specific constraints.
+ stream.width = MinIgnoreZero(
+ stream.width,
+ (unsigned short)encoding.constraints.maxWidth);
+ stream.height = MinIgnoreZero(
+ stream.height,
+ (unsigned short)encoding.constraints.maxHeight);
+
+ // webrtc.org uses kbps, we use bps
+ stream.jsMaxBitrate = encoding.constraints.maxBr/1000;
+ stream.jsScaleDownBy = encoding.constraints.scaleDownBy;
+
+ MOZ_ASSERT(stream.jsScaleDownBy >= 1.0);
+ uint32_t width = stream.width? stream.width : 640;
+ uint32_t height = stream.height? stream.height : 480;
+ uint32_t new_width = uint32_t(width / stream.jsScaleDownBy);
+ uint32_t new_height = uint32_t(height / stream.jsScaleDownBy);
+
+ if (new_width != width || new_height != height) {
+ // Estimate. Overridden on first frame.
+ SelectBitrates(new_width, new_height, stream.jsMaxBitrate,
+ mLastFramerateTenths,
+ stream.minBitrate,
+ stream.targetBitrate,
+ stream.maxBitrate);
+ }
+ // webrtc.org expects simulcast streams to be ordered by increasing
+ // fidelity, our jsep code does the opposite.
+ cinst.simulcastStream[numberOfSimulcastEncodings-i-1] = stream;
+ }
+
+ cinst.numberOfSimulcastStreams = numberOfSimulcastEncodings;
+}
+
+/**
+ * Perform validation on the codecConfig to be applied
+ * Verifies if the codec is already applied.
+ */
+MediaConduitErrorCode
+WebrtcVideoConduit::ValidateCodecConfig(const VideoCodecConfig* codecInfo,
+ bool send)
+{
+ if(!codecInfo)
+ {
+ CSFLogError(logTag, "%s Null CodecConfig ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ if((codecInfo->mName.empty()) ||
+ (codecInfo->mName.length() >= CODEC_PLNAME_SIZE))
+ {
+ CSFLogError(logTag, "%s Invalid Payload Name Length ", __FUNCTION__);
+ return kMediaConduitMalformedArgument;
+ }
+
+ return kMediaConduitNoError;
+}
+
+void
+WebrtcVideoConduit::VideoLatencyUpdate(uint64_t newSample)
+{
+ mVideoLatencyAvg = (sRoundingPadding * newSample + sAlphaNum * mVideoLatencyAvg) / sAlphaDen;
+}
+
+uint64_t
+WebrtcVideoConduit::MozVideoLatencyAvg()
+{
+ return mVideoLatencyAvg / sRoundingPadding;
+}
+
+uint64_t
+WebrtcVideoConduit::CodecPluginID()
+{
+ if (mExternalSendCodecHandle) {
+ return mExternalSendCodecHandle->PluginID();
+ } else if (mExternalRecvCodecHandle) {
+ return mExternalRecvCodecHandle->PluginID();
+ }
+ return 0;
+}
+
+bool
+WebrtcVideoConduit::DetermineREDAndULPFECPayloadTypes(uint8_t &payload_type_red, uint8_t &payload_type_ulpfec)
+{
+ webrtc::VideoCodec video_codec;
+ payload_type_red = INVALID_RTP_PAYLOAD;
+ payload_type_ulpfec = INVALID_RTP_PAYLOAD;
+
+ for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++)
+ {
+ if(mPtrViECodec->GetCodec(idx, video_codec) == 0)
+ {
+ switch(video_codec.codecType) {
+ case webrtc::VideoCodecType::kVideoCodecRED:
+ payload_type_red = video_codec.plType;
+ break;
+ case webrtc::VideoCodecType::kVideoCodecULPFEC:
+ payload_type_ulpfec = video_codec.plType;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return payload_type_red != INVALID_RTP_PAYLOAD
+ && payload_type_ulpfec != INVALID_RTP_PAYLOAD;
+}
+
+}// end namespace
diff --git a/media/webrtc/signaling/src/media-conduit/VideoConduit.h b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
new file mode 100755
index 000000000..323a6a284
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -0,0 +1,429 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef VIDEO_SESSION_H_
+#define VIDEO_SESSION_H_
+
+#include "nsAutoPtr.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Atomics.h"
+
+#include "MediaConduitInterface.h"
+#include "MediaEngineWrapper.h"
+#include "CodecStatistics.h"
+#include "LoadManagerFactory.h"
+#include "LoadManager.h"
+#include "runnable_utils.h"
+
+// conflicts with #include of scoped_ptr.h
+#undef FF
+// Video Engine Includes
+#include "webrtc/common_types.h"
+#ifdef FF
+#undef FF // Avoid name collision between scoped_ptr.h and nsCRTGlue.h.
+#endif
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/video_engine/include/vie_base.h"
+#include "webrtc/video_engine/include/vie_capture.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "webrtc/video_engine/include/vie_external_codec.h"
+#include "webrtc/video_engine/include/vie_render.h"
+#include "webrtc/video_engine/include/vie_network.h"
+#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
+
+/** This file hosts several structures identifying different aspects
+ * of a RTP Session.
+ */
+
+ using webrtc::ViEBase;
+ using webrtc::ViENetwork;
+ using webrtc::ViECodec;
+ using webrtc::ViECapture;
+ using webrtc::ViERender;
+ using webrtc::ViEExternalCapture;
+ using webrtc::ViEExternalCodec;
+
+namespace mozilla {
+
+class WebrtcAudioConduit;
+class nsThread;
+
+// Interface of external video encoder for WebRTC.
+class WebrtcVideoEncoder:public VideoEncoder
+ ,public webrtc::VideoEncoder
+{};
+
+// Interface of external video decoder for WebRTC.
+class WebrtcVideoDecoder:public VideoDecoder
+ ,public webrtc::VideoDecoder
+{};
+
+/**
+ * Concrete class for Video session. Hooks up
+ * - media-source and target to external transport
+ */
+class WebrtcVideoConduit : public VideoSessionConduit
+ , public webrtc::Transport
+ , public webrtc::ExternalRenderer
+{
+public:
+ //VoiceEngine defined constant for Payload Name Size.
+ static const unsigned int CODEC_PLNAME_SIZE;
+
+ /**
+ * Set up A/V sync between this (incoming) VideoConduit and an audio conduit.
+ */
+ void SyncTo(WebrtcAudioConduit *aConduit);
+
+ /**
+ * Function to attach Renderer end-point for the Media-Video conduit.
+ * @param aRenderer : Reference to the concrete Video renderer implementation
+ * Note: Multiple invocations of this API shall remove an existing renderer
+ * and attaches the new to the Conduit.
+ */
+ virtual MediaConduitErrorCode AttachRenderer(RefPtr<VideoRenderer> aVideoRenderer) override;
+ virtual void DetachRenderer() override;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTP Frames to the VideoEngine for decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTPPacket(const void *data, int len) override;
+
+ /**
+ * APIs used by the registered external transport to this Conduit to
+ * feed in received RTP Frames to the VideoEngine for decoding
+ */
+ virtual MediaConduitErrorCode ReceivedRTCPPacket(const void *data, int len) override;
+
+ virtual MediaConduitErrorCode StopTransmitting() override;
+ virtual MediaConduitErrorCode StartTransmitting() override;
+ virtual MediaConduitErrorCode StopReceiving() override;
+ virtual MediaConduitErrorCode StartReceiving() override;
+
+ /**
+ * Function to configure sending codec mode for different content
+ */
+ virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) override;
+
+ /**
+ * Function to configure send codec for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the video engine is configured with passed in codec for send
+ * On failure, video engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
+ * transmission sub-system on the engine.
+ */
+ virtual MediaConduitErrorCode ConfigureSendMediaCodec(const VideoCodecConfig* codecInfo) override;
+
+ /**
+ * Function to configure list of receive codecs for the video session
+ * @param sendSessionConfig: CodecConfiguration
+ * @result: On Success, the video engine is configured with passed in codec for send
+ * Also the playout is enabled.
+ * On failure, video engine transmit functionality is disabled.
+ * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
+ * transmission sub-system on the engine.
+ */
+ virtual MediaConduitErrorCode ConfigureRecvMediaCodecs(
+ const std::vector<VideoCodecConfig* >& codecConfigList) override;
+
+ /**
+ * Register Transport for this Conduit. RTP and RTCP frames from the VideoEngine
+ * shall be passed to the registered transport for transporting externally.
+ */
+ virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) override;
+
+ virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) override;
+
+ /**
+ * Function to set the encoding bitrate limits based on incoming frame size and rate
+ * @param width, height: dimensions of the frame
+ * @param cap: user-enforced max bitrate, or 0
+ * @param aLastFramerateTenths: holds the current input framerate
+ * @param out_start, out_min, out_max: bitrate results
+ */
+ void SelectBitrates(unsigned short width,
+ unsigned short height,
+ unsigned int cap,
+ mozilla::Atomic<int32_t, mozilla::Relaxed>& aLastFramerateTenths,
+ unsigned int& out_min,
+ unsigned int& out_start,
+ unsigned int& out_max);
+
+ /**
+ * Function to select and change the encoding resolution based on incoming frame size
+ * and current available bandwidth.
+ * @param width, height: dimensions of the frame
+ * @param frame: optional frame to submit for encoding after reconfig
+ */
+ bool SelectSendResolution(unsigned short width,
+ unsigned short height,
+ webrtc::I420VideoFrame *frame);
+
+ /**
+ * Function to reconfigure the current send codec for a different
+ * width/height/framerate/etc.
+ * @param width, height: dimensions of the frame
+ * @param frame: optional frame to submit for encoding after reconfig
+ */
+ nsresult ReconfigureSendCodec(unsigned short width,
+ unsigned short height,
+ webrtc::I420VideoFrame *frame);
+
+ /**
+ * Function to select and change the encoding frame rate based on incoming frame rate
+ * and max-mbps setting.
+ * @param current framerate
+ * @result new framerate
+ */
+ unsigned int SelectSendFrameRate(unsigned int framerate) const;
+
+ /**
+ * Function to deliver a capture video frame for encoding and transport
+ * @param video_frame: pointer to captured video-frame.
+ * @param video_frame_length: size of the frame
+ * @param width, height: dimensions of the frame
+ * @param video_type: Type of the video frame - I420, RAW
+ * @param captured_time: timestamp when the frame was captured.
+ * if 0 timestamp is automatcally generated by the engine.
+ *NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked
+ * This ensures the inserted video-frames can be transmitted by the conduit
+ */
+ virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame,
+ unsigned int video_frame_length,
+ unsigned short width,
+ unsigned short height,
+ VideoType video_type,
+ uint64_t capture_time) override;
+ virtual MediaConduitErrorCode SendVideoFrame(webrtc::I420VideoFrame& frame) override;
+
+ /**
+ * Set an external encoder object |encoder| to the payload type |pltype|
+ * for sender side codec.
+ */
+ virtual MediaConduitErrorCode SetExternalSendCodec(VideoCodecConfig* config,
+ VideoEncoder* encoder) override;
+
+ /**
+ * Set an external decoder object |decoder| to the payload type |pltype|
+ * for receiver side codec.
+ */
+ virtual MediaConduitErrorCode SetExternalRecvCodec(VideoCodecConfig* config,
+ VideoDecoder* decoder) override;
+
+ /**
+ * Enables use of Rtp Stream Id, and sets the extension ID.
+ */
+ virtual MediaConduitErrorCode EnableRTPStreamIdExtension(bool enabled, uint8_t id) override;
+
+ /**
+ * Webrtc transport implementation to send and receive RTP packet.
+ * VideoConduit registers itself as ExternalTransport to the VideoEngine
+ */
+ virtual int SendPacket(int channel, const void *data, size_t len) override;
+
+ /**
+ * Webrtc transport implementation to send and receive RTCP packet.
+ * VideoConduit registers itself as ExternalTransport to the VideoEngine
+ */
+ virtual int SendRTCPPacket(int channel, const void *data, size_t len) override;
+
+
+ /**
+ * Webrtc External Renderer Implementation APIs.
+ * Raw I420 Frames are delivred to the VideoConduit by the VideoEngine
+ */
+ virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int) override;
+
+ virtual int DeliverFrame(unsigned char*, size_t, uint32_t , int64_t,
+ int64_t, void *handle) override;
+
+ virtual int DeliverFrame(unsigned char*, size_t, uint32_t, uint32_t, uint32_t , int64_t,
+ int64_t, void *handle);
+
+ virtual int DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) override;
+
+ /**
+ * Does DeliverFrame() support a null buffer and non-null handle
+ * (video texture)?
+ * B2G support it (when using HW video decoder with graphic buffer output).
+ * XXX Investigate! Especially for Android
+ */
+ virtual bool IsTextureSupported() override {
+#ifdef WEBRTC_GONK
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ virtual uint64_t CodecPluginID() override;
+
+ unsigned short SendingWidth() override {
+ return mSendingWidth;
+ }
+
+ unsigned short SendingHeight() override {
+ return mSendingHeight;
+ }
+
+ unsigned int SendingMaxFs() override {
+ if(mCurSendCodecConfig) {
+ return mCurSendCodecConfig->mEncodingConstraints.maxFs;
+ }
+ return 0;
+ }
+
+ unsigned int SendingMaxFr() override {
+ if(mCurSendCodecConfig) {
+ return mCurSendCodecConfig->mEncodingConstraints.maxFps;
+ }
+ return 0;
+ }
+
+ WebrtcVideoConduit();
+ virtual ~WebrtcVideoConduit();
+
+ MediaConduitErrorCode InitMain();
+ virtual MediaConduitErrorCode Init();
+ virtual void Destroy();
+
+ int GetChannel() { return mChannel; }
+ webrtc::VideoEngine* GetVideoEngine() { return mVideoEngine; }
+ bool GetLocalSSRC(unsigned int* ssrc) override;
+ bool SetLocalSSRC(unsigned int ssrc) override;
+ bool GetRemoteSSRC(unsigned int* ssrc) override;
+ bool SetLocalCNAME(const char* cname) override;
+ bool GetVideoEncoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* droppedFrames) override;
+ bool GetVideoDecoderStats(double* framerateMean,
+ double* framerateStdDev,
+ double* bitrateMean,
+ double* bitrateStdDev,
+ uint32_t* discardedPackets) override;
+ bool GetAVStats(int32_t* jitterBufferDelayMs,
+ int32_t* playoutBufferDelayMs,
+ int32_t* avSyncOffsetMs) override;
+ bool GetRTPStats(unsigned int* jitterMs, unsigned int* cumulativeLost) override;
+ bool GetRTCPReceiverReport(DOMHighResTimeStamp* timestamp,
+ uint32_t* jitterMs,
+ uint32_t* packetsReceived,
+ uint64_t* bytesReceived,
+ uint32_t* cumulativeLost,
+ int32_t* rttMs) override;
+ bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
+ unsigned int* packetsSent,
+ uint64_t* bytesSent) override;
+ uint64_t MozVideoLatencyAvg();
+
+private:
+ DISALLOW_COPY_AND_ASSIGN(WebrtcVideoConduit);
+
+ static inline bool OnThread(nsIEventTarget *thread)
+ {
+ bool on;
+ nsresult rv;
+ rv = thread->IsOnCurrentThread(&on);
+
+ // If the target thread has already shut down, we don't want to assert.
+ if (rv != NS_ERROR_NOT_INITIALIZED) {
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ }
+
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return false;
+ }
+ return on;
+ }
+
+ //Local database of currently applied receive codecs
+ typedef std::vector<VideoCodecConfig* > RecvCodecList;
+
+ //Function to convert between WebRTC and Conduit codec structures
+ void CodecConfigToWebRTCCodec(const VideoCodecConfig* codecInfo,
+ webrtc::VideoCodec& cinst);
+
+ //Checks the codec to be applied
+ MediaConduitErrorCode ValidateCodecConfig(const VideoCodecConfig* codecInfo, bool send);
+
+ //Utility function to dump recv codec database
+ void DumpCodecDB() const;
+
+ // Video Latency Test averaging filter
+ void VideoLatencyUpdate(uint64_t new_sample);
+
+ // Utility function to determine RED and ULPFEC payload types
+ bool DetermineREDAndULPFECPayloadTypes(uint8_t &payload_type_red, uint8_t &payload_type_ulpfec);
+
+ webrtc::VideoEngine* mVideoEngine;
+ mozilla::ReentrantMonitor mTransportMonitor;
+ RefPtr<TransportInterface> mTransmitterTransport;
+ RefPtr<TransportInterface> mReceiverTransport;
+ RefPtr<VideoRenderer> mRenderer;
+
+ ScopedCustomReleasePtr<webrtc::ViEBase> mPtrViEBase;
+ ScopedCustomReleasePtr<webrtc::ViECapture> mPtrViECapture;
+ ScopedCustomReleasePtr<webrtc::ViECodec> mPtrViECodec;
+ ScopedCustomReleasePtr<webrtc::ViENetwork> mPtrViENetwork;
+ ScopedCustomReleasePtr<webrtc::ViERender> mPtrViERender;
+ ScopedCustomReleasePtr<webrtc::ViERTP_RTCP> mPtrRTP;
+ ScopedCustomReleasePtr<webrtc::ViEExternalCodec> mPtrExtCodec;
+
+ webrtc::ViEExternalCapture* mPtrExtCapture;
+
+ // Engine state we are concerned with.
+ mozilla::Atomic<bool> mEngineTransmitting; //If true ==> Transmit Sub-system is up and running
+ mozilla::Atomic<bool> mEngineReceiving; // if true ==> Receive Sus-sysmtem up and running
+
+ int mChannel; // Video Channel for this conduit
+ int mCapId; // Capturer for this conduit
+
+ Mutex mCodecMutex; // protects mCurrSendCodecConfig
+ nsAutoPtr<VideoCodecConfig> mCurSendCodecConfig;
+ bool mInReconfig;
+
+ unsigned short mLastWidth;
+ unsigned short mLastHeight;
+ unsigned short mSendingWidth;
+ unsigned short mSendingHeight;
+ unsigned short mReceivingWidth;
+ unsigned short mReceivingHeight;
+ unsigned int mSendingFramerate;
+ // scaled by *10 because Atomic<double/float> isn't supported
+ mozilla::Atomic<int32_t, mozilla::Relaxed> mLastFramerateTenths;
+ unsigned short mNumReceivingStreams;
+ bool mVideoLatencyTestEnable;
+ uint64_t mVideoLatencyAvg;
+ uint32_t mMinBitrate;
+ uint32_t mStartBitrate;
+ uint32_t mMaxBitrate;
+ uint32_t mMinBitrateEstimate;
+
+ bool mRtpStreamIdEnabled;
+ uint8_t mRtpStreamIdExtId;
+
+ static const unsigned int sAlphaNum = 7;
+ static const unsigned int sAlphaDen = 8;
+ static const unsigned int sRoundingPadding = 1024;
+
+ RefPtr<WebrtcAudioConduit> mSyncedTo;
+
+ nsAutoPtr<VideoCodecConfig> mExternalSendCodec;
+ nsAutoPtr<VideoCodecConfig> mExternalRecvCodec;
+ nsAutoPtr<VideoEncoder> mExternalSendCodecHandle;
+ nsAutoPtr<VideoDecoder> mExternalRecvCodecHandle;
+
+ // statistics object for video codec;
+ nsAutoPtr<VideoCodecStatistics> mVideoCodecStat;
+
+ nsAutoPtr<LoadManager> mLoadManager;
+ webrtc::VideoCodecMode mCodecMode;
+};
+} // end namespace
+
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/VideoTypes.h b/media/webrtc/signaling/src/media-conduit/VideoTypes.h
new file mode 100755
index 000000000..e4cff3962
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/VideoTypes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2012, The WebRTC project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VIDEO_TYPE_
+#define VIDEO_TYPE_
+
+namespace mozilla
+{
+/*
+ * Enumeration for different video types supported by the
+ * video-engine. If more types will be supported in the future
+ * newer one shall be appended to the bottom of the list
+ */
+enum VideoType
+{
+ kVideoI420 = 0,
+ kVideoYV12 = 1,
+ kVideoYUY2 = 2,
+ kVideoUYVY = 3,
+ kVideoIYUV = 4,
+ kVideoARGB = 5,
+ kVideoRGB24 = 6,
+ kVideoRGB565 = 7,
+ kVideoARGB4444 = 8,
+ kVideoARGB1555 = 9,
+ kVideoMJPEG = 10,
+ kVideoNV12 = 11,
+ kVideoNV21 = 12,
+ kVideoBGRA = 13,
+ kVideoUnknown = 99
+};
+}
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp
new file mode 100644
index 000000000..ad47e5316
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp
@@ -0,0 +1,965 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcGmpVideoCodec.h"
+
+#include <iostream>
+#include <vector>
+
+#include "mozilla/Move.h"
+#include "mozilla/SyncRunnable.h"
+#include "VideoConduit.h"
+#include "AudioConduit.h"
+#include "runnable_utils.h"
+
+#include "mozIGeckoMediaPluginService.h"
+#include "nsServiceManagerUtils.h"
+#include "GMPVideoDecoderProxy.h"
+#include "GMPVideoEncoderProxy.h"
+#include "MainThreadUtils.h"
+
+#include "gmp-video-host.h"
+#include "gmp-video-frame-i420.h"
+#include "gmp-video-frame-encoded.h"
+
+#include "webrtc/video_engine/include/vie_external_codec.h"
+
+namespace mozilla {
+
+#ifdef LOG
+#undef LOG
+#endif
+
+#ifdef MOZILLA_INTERNAL_API
+extern mozilla::LogModule* GetGMPLog();
+#else
+// For CPP unit tests
+PRLogModuleInfo*
+GetGMPLog()
+{
+ static PRLogModuleInfo *sLog;
+ if (!sLog)
+ sLog = PR_NewLogModule("GMP");
+ return sLog;
+}
+#endif
+#define LOGD(msg) MOZ_LOG(GetGMPLog(), mozilla::LogLevel::Debug, msg)
+#define LOG(level, msg) MOZ_LOG(GetGMPLog(), (level), msg)
+
+WebrtcGmpPCHandleSetter::WebrtcGmpPCHandleSetter(const std::string& aPCHandle)
+{
+ if (!NS_IsMainThread()) {
+ MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main");
+ return;
+ }
+ MOZ_ASSERT(sCurrentHandle.empty());
+ sCurrentHandle = aPCHandle;
+}
+
+WebrtcGmpPCHandleSetter::~WebrtcGmpPCHandleSetter()
+{
+ if (!NS_IsMainThread()) {
+ MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main");
+ return;
+ }
+
+ sCurrentHandle.clear();
+}
+
+/* static */ std::string
+WebrtcGmpPCHandleSetter::GetCurrentHandle()
+{
+ if (!NS_IsMainThread()) {
+ MOZ_ASSERT(false, "WebrtcGmpPCHandleSetter can only be used on main");
+ return "";
+ }
+
+ return sCurrentHandle;
+}
+
+std::string WebrtcGmpPCHandleSetter::sCurrentHandle = "";
+
+// Encoder.
+WebrtcGmpVideoEncoder::WebrtcGmpVideoEncoder()
+ : mGMP(nullptr)
+ , mInitting(false)
+ , mHost(nullptr)
+ , mMaxPayloadSize(0)
+ , mCallbackMutex("WebrtcGmpVideoEncoder encoded callback mutex")
+ , mCallback(nullptr)
+ , mCachedPluginId(0)
+{
+#ifdef MOZILLA_INTERNAL_API
+ if (mPCHandle.empty()) {
+ mPCHandle = WebrtcGmpPCHandleSetter::GetCurrentHandle();
+ }
+ MOZ_ASSERT(!mPCHandle.empty());
+#endif
+}
+
+WebrtcGmpVideoEncoder::~WebrtcGmpVideoEncoder()
+{
+ // We should not have been destroyed if we never closed our GMP
+ MOZ_ASSERT(!mGMP);
+}
+
+static int
+WebrtcFrameTypeToGmpFrameType(webrtc::VideoFrameType aIn,
+ GMPVideoFrameType *aOut)
+{
+ MOZ_ASSERT(aOut);
+ switch(aIn) {
+ case webrtc::kKeyFrame:
+ *aOut = kGMPKeyFrame;
+ break;
+ case webrtc::kDeltaFrame:
+ *aOut = kGMPDeltaFrame;
+ break;
+ case webrtc::kGoldenFrame:
+ *aOut = kGMPGoldenFrame;
+ break;
+ case webrtc::kAltRefFrame:
+ *aOut = kGMPAltRefFrame;
+ break;
+ case webrtc::kSkipFrame:
+ *aOut = kGMPSkipFrame;
+ break;
+ default:
+ MOZ_CRASH("Unexpected VideoFrameType");
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+static int
+GmpFrameTypeToWebrtcFrameType(GMPVideoFrameType aIn,
+ webrtc::VideoFrameType *aOut)
+{
+ MOZ_ASSERT(aOut);
+ switch(aIn) {
+ case kGMPKeyFrame:
+ *aOut = webrtc::kKeyFrame;
+ break;
+ case kGMPDeltaFrame:
+ *aOut = webrtc::kDeltaFrame;
+ break;
+ case kGMPGoldenFrame:
+ *aOut = webrtc::kGoldenFrame;
+ break;
+ case kGMPAltRefFrame:
+ *aOut = webrtc::kAltRefFrame;
+ break;
+ case kGMPSkipFrame:
+ *aOut = webrtc::kSkipFrame;
+ break;
+ default:
+ MOZ_CRASH("Unexpected GMPVideoFrameType");
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoEncoder::InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ uint32_t aMaxPayloadSize)
+{
+ if (!mMPS) {
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ }
+ MOZ_ASSERT(mMPS);
+
+ if (!mGMPThread) {
+ if (NS_WARN_IF(NS_FAILED(mMPS->GetThread(getter_AddRefs(mGMPThread))))) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ // Bug XXXXXX: transfer settings from codecSettings to codec.
+ GMPVideoCodec codecParams;
+ memset(&codecParams, 0, sizeof(codecParams));
+
+ codecParams.mGMPApiVersion = 33;
+ codecParams.mStartBitrate = aCodecSettings->startBitrate;
+ codecParams.mMinBitrate = aCodecSettings->minBitrate;
+ codecParams.mMaxBitrate = aCodecSettings->maxBitrate;
+ codecParams.mMaxFramerate = aCodecSettings->maxFramerate;
+ mMaxPayloadSize = aMaxPayloadSize;
+
+ memset(&mCodecSpecificInfo, 0, sizeof(webrtc::CodecSpecificInfo));
+ mCodecSpecificInfo.codecType = webrtc::kVideoCodecH264;
+ mCodecSpecificInfo.codecSpecific.H264.packetizationMode = aCodecSettings->codecSpecific.H264.packetizationMode;
+ if (mCodecSpecificInfo.codecSpecific.H264.packetizationMode == 1) {
+ mMaxPayloadSize = 0; // No limit.
+ }
+
+ if (aCodecSettings->mode == webrtc::kScreensharing) {
+ codecParams.mMode = kGMPScreensharing;
+ } else {
+ codecParams.mMode = kGMPRealtimeVideo;
+ }
+
+ codecParams.mWidth = aCodecSettings->width;
+ codecParams.mHeight = aCodecSettings->height;
+
+ RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle));
+ mGMPThread->Dispatch(WrapRunnableNM(WebrtcGmpVideoEncoder::InitEncode_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this),
+ codecParams,
+ aNumberOfCores,
+ aMaxPayloadSize,
+ initDone),
+ NS_DISPATCH_NORMAL);
+
+ // Since init of the GMP encoder is a multi-step async dispatch (including
+ // dispatches to main), and since this function is invoked on main, there's
+ // no safe way to block until this init is done. If an error occurs, we'll
+ // handle it later.
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */
+void
+WebrtcGmpVideoEncoder::InitEncode_g(
+ const RefPtr<WebrtcGmpVideoEncoder>& aThis,
+ const GMPVideoCodec& aCodecParams,
+ int32_t aNumberOfCores,
+ uint32_t aMaxPayloadSize,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone)
+{
+ nsTArray<nsCString> tags;
+ tags.AppendElement(NS_LITERAL_CSTRING("h264"));
+ UniquePtr<GetGMPVideoEncoderCallback> callback(
+ new InitDoneCallback(aThis, aInitDone, aCodecParams, aMaxPayloadSize));
+ aThis->mInitting = true;
+ nsresult rv = aThis->mMPS->GetGMPVideoEncoder(nullptr,
+ &tags,
+ NS_LITERAL_CSTRING(""),
+ Move(callback));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOGD(("GMP Encode: GetGMPVideoEncoder failed"));
+ aThis->Close_g();
+ aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR,
+ "GMP Encode: GetGMPVideoEncoder failed");
+ }
+}
+
+int32_t
+WebrtcGmpVideoEncoder::GmpInitDone(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ std::string* aErrorOut)
+{
+ if (!mInitting || !aGMP || !aHost) {
+ *aErrorOut = "GMP Encode: Either init was aborted, "
+ "or init failed to supply either a GMP Encoder or GMP host.";
+ if (aGMP) {
+ // This could destroy us, since aGMP may be the last thing holding a ref
+ // Return immediately.
+ aGMP->Close();
+ }
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mInitting = false;
+
+ if (mGMP && mGMP != aGMP) {
+ Close_g();
+ }
+
+ mGMP = aGMP;
+ mHost = aHost;
+ mCachedPluginId = mGMP->GetPluginId();
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoEncoder::GmpInitDone(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ const GMPVideoCodec& aCodecParams,
+ uint32_t aMaxPayloadSize,
+ std::string* aErrorOut)
+{
+ int32_t r = GmpInitDone(aGMP, aHost, aErrorOut);
+ if (r != WEBRTC_VIDEO_CODEC_OK) {
+ // We might have been destroyed if GmpInitDone failed.
+ // Return immediately.
+ return r;
+ }
+ mCodecParams = aCodecParams;
+ return InitEncoderForSize(aCodecParams.mWidth,
+ aCodecParams.mHeight,
+ aErrorOut);
+}
+
+void
+WebrtcGmpVideoEncoder::Close_g()
+{
+ GMPVideoEncoderProxy* gmp(mGMP);
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+
+ if (gmp) {
+ // Do this last, since this could cause us to be destroyed
+ gmp->Close();
+ }
+}
+
+int32_t
+WebrtcGmpVideoEncoder::InitEncoderForSize(unsigned short aWidth,
+ unsigned short aHeight,
+ std::string* aErrorOut)
+{
+ mCodecParams.mWidth = aWidth;
+ mCodecParams.mHeight = aHeight;
+ // Pass dummy codecSpecific data for now...
+ nsTArray<uint8_t> codecSpecific;
+
+ GMPErr err = mGMP->InitEncode(mCodecParams, codecSpecific, this, 1, mMaxPayloadSize);
+ if (err != GMPNoErr) {
+ *aErrorOut = "GMP Encode: InitEncode failed";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+
+int32_t
+WebrtcGmpVideoEncoder::Encode(const webrtc::I420VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes)
+{
+ MOZ_ASSERT(aInputImage.width() >= 0 && aInputImage.height() >= 0);
+ // Would be really nice to avoid this sync dispatch, but it would require a
+ // copy of the frame, since it doesn't appear to actually have a refcount.
+ mGMPThread->Dispatch(
+ WrapRunnable(this,
+ &WebrtcGmpVideoEncoder::Encode_g,
+ &aInputImage,
+ aCodecSpecificInfo,
+ aFrameTypes),
+ NS_DISPATCH_SYNC);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void
+WebrtcGmpVideoEncoder::RegetEncoderForResolutionChange(
+ uint32_t aWidth,
+ uint32_t aHeight,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone)
+{
+ Close_g();
+
+ UniquePtr<GetGMPVideoEncoderCallback> callback(
+ new InitDoneForResolutionChangeCallback(this,
+ aInitDone,
+ aWidth,
+ aHeight));
+
+ // OpenH264 codec (at least) can't handle dynamic input resolution changes
+ // re-init the plugin when the resolution changes
+ // XXX allow codec to indicate it doesn't need re-init!
+ nsTArray<nsCString> tags;
+ tags.AppendElement(NS_LITERAL_CSTRING("h264"));
+ mInitting = true;
+ if (NS_WARN_IF(NS_FAILED(mMPS->GetGMPVideoEncoder(nullptr,
+ &tags,
+ NS_LITERAL_CSTRING(""),
+ Move(callback))))) {
+ aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR,
+ "GMP Encode: GetGMPVideoEncoder failed");
+ }
+}
+
+int32_t
+WebrtcGmpVideoEncoder::Encode_g(const webrtc::I420VideoFrame* aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes)
+{
+ if (!mGMP) {
+ // destroyed via Terminate(), failed to init, or just not initted yet
+ LOGD(("GMP Encode: not initted yet"));
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ MOZ_ASSERT(mHost);
+
+ if (static_cast<uint32_t>(aInputImage->width()) != mCodecParams.mWidth ||
+ static_cast<uint32_t>(aInputImage->height()) != mCodecParams.mHeight) {
+ LOGD(("GMP Encode: resolution change from %ux%u to %dx%d",
+ mCodecParams.mWidth, mCodecParams.mHeight, aInputImage->width(), aInputImage->height()));
+
+ RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle));
+ RegetEncoderForResolutionChange(aInputImage->width(),
+ aInputImage->height(),
+ initDone);
+ if (!mGMP) {
+ // We needed to go async to re-get the encoder. Bail.
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ GMPVideoFrame* ftmp = nullptr;
+ GMPErr err = mHost->CreateFrame(kGMPI420VideoFrame, &ftmp);
+ if (err != GMPNoErr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ GMPUniquePtr<GMPVideoi420Frame> frame(static_cast<GMPVideoi420Frame*>(ftmp));
+
+ err = frame->CreateFrame(aInputImage->allocated_size(webrtc::kYPlane),
+ aInputImage->buffer(webrtc::kYPlane),
+ aInputImage->allocated_size(webrtc::kUPlane),
+ aInputImage->buffer(webrtc::kUPlane),
+ aInputImage->allocated_size(webrtc::kVPlane),
+ aInputImage->buffer(webrtc::kVPlane),
+ aInputImage->width(),
+ aInputImage->height(),
+ aInputImage->stride(webrtc::kYPlane),
+ aInputImage->stride(webrtc::kUPlane),
+ aInputImage->stride(webrtc::kVPlane));
+ if (err != GMPNoErr) {
+ return err;
+ }
+ frame->SetTimestamp((aInputImage->timestamp() * 1000ll)/90); // note: rounds down!
+ //frame->SetDuration(1000000ll/30); // XXX base duration on measured current FPS - or don't bother
+
+ // Bug XXXXXX: Set codecSpecific info
+ GMPCodecSpecificInfo info;
+ memset(&info, 0, sizeof(info));
+ info.mCodecType = kGMPVideoCodecH264;
+ nsTArray<uint8_t> codecSpecificInfo;
+ codecSpecificInfo.AppendElements((uint8_t*)&info, sizeof(GMPCodecSpecificInfo));
+
+ nsTArray<GMPVideoFrameType> gmp_frame_types;
+ for (auto it = aFrameTypes->begin(); it != aFrameTypes->end(); ++it) {
+ GMPVideoFrameType ft;
+
+ int32_t ret = WebrtcFrameTypeToGmpFrameType(*it, &ft);
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ return ret;
+ }
+
+ gmp_frame_types.AppendElement(ft);
+ }
+
+ LOGD(("GMP Encode: %llu", (aInputImage->timestamp() * 1000ll)/90));
+ err = mGMP->Encode(Move(frame), codecSpecificInfo, gmp_frame_types);
+ if (err != GMPNoErr) {
+ return err;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoEncoder::RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* aCallback)
+{
+ MutexAutoLock lock(mCallbackMutex);
+ mCallback = aCallback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */ void
+WebrtcGmpVideoEncoder::ReleaseGmp_g(RefPtr<WebrtcGmpVideoEncoder>& aEncoder)
+{
+ aEncoder->Close_g();
+}
+
+int32_t
+WebrtcGmpVideoEncoder::ReleaseGmp()
+{
+ LOGD(("GMP Released:"));
+ if (mGMPThread) {
+ mGMPThread->Dispatch(
+ WrapRunnableNM(&WebrtcGmpVideoEncoder::ReleaseGmp_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this)),
+ NS_DISPATCH_NORMAL);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoEncoder::SetChannelParameters(uint32_t aPacketLoss, int aRTT)
+{
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoEncoder::SetRates(uint32_t aNewBitRate, uint32_t aFrameRate)
+{
+ MOZ_ASSERT(mGMPThread);
+ if (aFrameRate == 0) {
+ aFrameRate = 30; // Assume 30fps if we don't know the rate
+ }
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoEncoder::SetRates_g,
+ RefPtr<WebrtcGmpVideoEncoder>(this),
+ aNewBitRate,
+ aFrameRate),
+ NS_DISPATCH_NORMAL);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */ int32_t
+WebrtcGmpVideoEncoder::SetRates_g(RefPtr<WebrtcGmpVideoEncoder> aThis,
+ uint32_t aNewBitRate,
+ uint32_t aFrameRate)
+{
+ if (!aThis->mGMP) {
+ // destroyed via Terminate()
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ GMPErr err = aThis->mGMP->SetRates(aNewBitRate, aFrameRate);
+ if (err != GMPNoErr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// GMPVideoEncoderCallback virtual functions.
+void
+WebrtcGmpVideoEncoder::Terminated()
+{
+ LOGD(("GMP Encoder Terminated: %p", (void *)this));
+
+ mGMP->Close();
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+ // Could now notify that it's dead
+}
+
+void
+WebrtcGmpVideoEncoder::Encoded(GMPVideoEncodedFrame* aEncodedFrame,
+ const nsTArray<uint8_t>& aCodecSpecificInfo)
+{
+ MutexAutoLock lock(mCallbackMutex);
+ if (mCallback) {
+ webrtc::VideoFrameType ft;
+ GmpFrameTypeToWebrtcFrameType(aEncodedFrame->FrameType(), &ft);
+ uint32_t timestamp = (aEncodedFrame->TimeStamp() * 90ll + 999)/1000;
+
+ LOGD(("GMP Encoded: %llu, type %d, len %d",
+ aEncodedFrame->TimeStamp(),
+ aEncodedFrame->BufferType(),
+ aEncodedFrame->Size()));
+
+ // Right now makes one Encoded() callback per unit
+ // XXX convert to FragmentationHeader format (array of offsets and sizes plus a buffer) in
+ // combination with H264 packetization changes in webrtc/trunk code
+ uint8_t *buffer = aEncodedFrame->Buffer();
+ uint8_t *end = aEncodedFrame->Buffer() + aEncodedFrame->Size();
+ size_t size_bytes;
+ switch (aEncodedFrame->BufferType()) {
+ case GMP_BufferSingle:
+ size_bytes = 0;
+ break;
+ case GMP_BufferLength8:
+ size_bytes = 1;
+ break;
+ case GMP_BufferLength16:
+ size_bytes = 2;
+ break;
+ case GMP_BufferLength24:
+ size_bytes = 3;
+ break;
+ case GMP_BufferLength32:
+ size_bytes = 4;
+ break;
+ default:
+ // Really that it's not in the enum
+ LOG(LogLevel::Error,
+ ("GMP plugin returned incorrect type (%d)", aEncodedFrame->BufferType()));
+ // XXX Bug 1041232 - need a better API for interfacing to the
+ // plugin so we can kill it here
+ return;
+ }
+
+ struct nal_entry {
+ uint32_t offset;
+ uint32_t size;
+ };
+ AutoTArray<nal_entry, 1> nals;
+ uint32_t size;
+ // make sure we don't read past the end of the buffer getting the size
+ while (buffer+size_bytes < end) {
+ switch (aEncodedFrame->BufferType()) {
+ case GMP_BufferSingle:
+ size = aEncodedFrame->Size();
+ break;
+ case GMP_BufferLength8:
+ size = *buffer++;
+ break;
+ case GMP_BufferLength16:
+ // presumes we can do unaligned loads
+ size = *(reinterpret_cast<uint16_t*>(buffer));
+ buffer += 2;
+ break;
+ case GMP_BufferLength24:
+ // 24-bits is a pain, since byte-order issues make things painful
+ // I'm going to define 24-bit as little-endian always; big-endian must convert
+ size = ((uint32_t) *buffer) |
+ (((uint32_t) *(buffer+1)) << 8) |
+ (((uint32_t) *(buffer+2)) << 16);
+ buffer += 3;
+ break;
+ case GMP_BufferLength32:
+ // presumes we can do unaligned loads
+ size = *(reinterpret_cast<uint32_t*>(buffer));
+ buffer += 4;
+ break;
+ default:
+ MOZ_CRASH("GMP_BufferType already handled in switch above");
+ }
+ if (buffer+size > end) {
+ // XXX see above - should we kill the plugin for returning extra bytes? Probably
+ LOG(LogLevel::Error,
+ ("GMP plugin returned badly formatted encoded data: end is %td bytes past buffer end",
+ buffer+size - end));
+ return;
+ }
+ // XXX optimize by making buffer an offset
+ nal_entry nal = {((uint32_t) (buffer-aEncodedFrame->Buffer())), (uint32_t) size};
+ nals.AppendElement(nal);
+ buffer += size;
+ // on last one, buffer == end normally
+ }
+ if (buffer != end) {
+ // At most 3 bytes can be left over, depending on buffertype
+ LOGD(("GMP plugin returned %td extra bytes", end - buffer));
+ }
+
+ size_t num_nals = nals.Length();
+ if (num_nals > 0) {
+ webrtc::RTPFragmentationHeader fragmentation;
+ fragmentation.VerifyAndAllocateFragmentationHeader(num_nals);
+ for (size_t i = 0; i < num_nals; i++) {
+ fragmentation.fragmentationOffset[i] = nals[i].offset;
+ fragmentation.fragmentationLength[i] = nals[i].size;
+ }
+
+ webrtc::EncodedImage unit(aEncodedFrame->Buffer(), size, size);
+ unit._frameType = ft;
+ unit._timeStamp = timestamp;
+ // Ensure we ignore this when calculating RTCP timestamps
+ unit.capture_time_ms_ = -1;
+ unit._completeFrame = true;
+
+ // TODO: Currently the OpenH264 codec does not preserve any codec
+ // specific info passed into it and just returns default values.
+ // If this changes in the future, it would be nice to get rid of
+ // mCodecSpecificInfo.
+ mCallback->Encoded(unit, &mCodecSpecificInfo, &fragmentation);
+ }
+ }
+}
+
+// Decoder.
+WebrtcGmpVideoDecoder::WebrtcGmpVideoDecoder() :
+ mGMP(nullptr),
+ mInitting(false),
+ mHost(nullptr),
+ mCallbackMutex("WebrtcGmpVideoDecoder decoded callback mutex"),
+ mCallback(nullptr),
+ mCachedPluginId(0),
+ mDecoderStatus(GMPNoErr)
+{
+#ifdef MOZILLA_INTERNAL_API
+ if (mPCHandle.empty()) {
+ mPCHandle = WebrtcGmpPCHandleSetter::GetCurrentHandle();
+ }
+ MOZ_ASSERT(!mPCHandle.empty());
+#endif
+}
+
+WebrtcGmpVideoDecoder::~WebrtcGmpVideoDecoder()
+{
+ // We should not have been destroyed if we never closed our GMP
+ MOZ_ASSERT(!mGMP);
+}
+
+int32_t
+WebrtcGmpVideoDecoder::InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores)
+{
+ if (!mMPS) {
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ }
+ MOZ_ASSERT(mMPS);
+
+ if (!mGMPThread) {
+ if (NS_WARN_IF(NS_FAILED(mMPS->GetThread(getter_AddRefs(mGMPThread))))) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ }
+
+ RefPtr<GmpInitDoneRunnable> initDone(new GmpInitDoneRunnable(mPCHandle));
+ mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoDecoder::InitDecode_g,
+ RefPtr<WebrtcGmpVideoDecoder>(this),
+ aCodecSettings,
+ aNumberOfCores,
+ initDone),
+ NS_DISPATCH_NORMAL);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+/* static */ void
+WebrtcGmpVideoDecoder::InitDecode_g(
+ const RefPtr<WebrtcGmpVideoDecoder>& aThis,
+ const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone)
+{
+ nsTArray<nsCString> tags;
+ tags.AppendElement(NS_LITERAL_CSTRING("h264"));
+ UniquePtr<GetGMPVideoDecoderCallback> callback(
+ new InitDoneCallback(aThis, aInitDone));
+ aThis->mInitting = true;
+ nsresult rv = aThis->mMPS->GetGMPVideoDecoder(nullptr,
+ &tags,
+ NS_LITERAL_CSTRING(""),
+ Move(callback));
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ LOGD(("GMP Decode: GetGMPVideoDecoder failed"));
+ aThis->Close_g();
+ aInitDone->Dispatch(WEBRTC_VIDEO_CODEC_ERROR,
+ "GMP Decode: GetGMPVideoDecoder failed.");
+ }
+}
+
+int32_t
+WebrtcGmpVideoDecoder::GmpInitDone(GMPVideoDecoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ std::string* aErrorOut)
+{
+ if (!mInitting || !aGMP || !aHost) {
+ *aErrorOut = "GMP Decode: Either init was aborted, "
+ "or init failed to supply either a GMP decoder or GMP host.";
+ if (aGMP) {
+ // This could destroy us, since aGMP may be the last thing holding a ref
+ // Return immediately.
+ aGMP->Close();
+ }
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mInitting = false;
+
+ if (mGMP && mGMP != aGMP) {
+ Close_g();
+ }
+
+ mGMP = aGMP;
+ mHost = aHost;
+ mCachedPluginId = mGMP->GetPluginId();
+ // Bug XXXXXX: transfer settings from codecSettings to codec.
+ GMPVideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+ codec.mGMPApiVersion = 33;
+
+ // XXX this is currently a hack
+ //GMPVideoCodecUnion codecSpecific;
+ //memset(&codecSpecific, 0, sizeof(codecSpecific));
+ nsTArray<uint8_t> codecSpecific;
+ nsresult rv = mGMP->InitDecode(codec, codecSpecific, this, 1);
+ if (NS_FAILED(rv)) {
+ *aErrorOut = "GMP Decode: InitDecode failed";
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void
+WebrtcGmpVideoDecoder::Close_g()
+{
+ GMPVideoDecoderProxy* gmp(mGMP);
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+
+ if (gmp) {
+ // Do this last, since this could cause us to be destroyed
+ gmp->Close();
+ }
+}
+
+int32_t
+WebrtcGmpVideoDecoder::Decode(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs)
+{
+ int32_t ret;
+ MOZ_ASSERT(mGMPThread);
+ MOZ_ASSERT(!NS_IsMainThread());
+ // Would be really nice to avoid this sync dispatch, but it would require a
+ // copy of the frame, since it doesn't appear to actually have a refcount.
+ mozilla::SyncRunnable::DispatchToThread(mGMPThread,
+ WrapRunnableRet(&ret, this,
+ &WebrtcGmpVideoDecoder::Decode_g,
+ aInputImage,
+ aMissingFrames,
+ aFragmentation,
+ aCodecSpecificInfo,
+ aRenderTimeMs));
+
+ return ret;
+}
+
+int32_t
+WebrtcGmpVideoDecoder::Decode_g(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs)
+{
+ if (!mGMP) {
+ // destroyed via Terminate(), failed to init, or just not initted yet
+ LOGD(("GMP Decode: not initted yet"));
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ MOZ_ASSERT(mHost);
+
+ if (!aInputImage._length) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ GMPVideoFrame* ftmp = nullptr;
+ GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
+ if (err != GMPNoErr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
+ err = frame->CreateEmptyFrame(aInputImage._length);
+ if (err != GMPNoErr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // XXX At this point, we only will get mode1 data (a single length and a buffer)
+ // Session_info.cc/etc code needs to change to support mode 0.
+ *(reinterpret_cast<uint32_t*>(frame->Buffer())) = frame->Size();
+
+ // XXX It'd be wonderful not to have to memcpy the encoded data!
+ memcpy(frame->Buffer()+4, aInputImage._buffer+4, frame->Size()-4);
+
+ frame->SetEncodedWidth(aInputImage._encodedWidth);
+ frame->SetEncodedHeight(aInputImage._encodedHeight);
+ frame->SetTimeStamp((aInputImage._timeStamp * 1000ll)/90); // rounds down
+ frame->SetCompleteFrame(aInputImage._completeFrame);
+ frame->SetBufferType(GMP_BufferLength32);
+
+ GMPVideoFrameType ft;
+ int32_t ret = WebrtcFrameTypeToGmpFrameType(aInputImage._frameType, &ft);
+ if (ret != WEBRTC_VIDEO_CODEC_OK) {
+ return ret;
+ }
+
+ // Bug XXXXXX: Set codecSpecific info
+ GMPCodecSpecificInfo info;
+ memset(&info, 0, sizeof(info));
+ info.mCodecType = kGMPVideoCodecH264;
+ info.mCodecSpecific.mH264.mSimulcastIdx = 0;
+ nsTArray<uint8_t> codecSpecificInfo;
+ codecSpecificInfo.AppendElements((uint8_t*)&info, sizeof(GMPCodecSpecificInfo));
+
+ LOGD(("GMP Decode: %llu, len %d", frame->TimeStamp(), aInputImage._length));
+ nsresult rv = mGMP->Decode(Move(frame),
+ aMissingFrames,
+ codecSpecificInfo,
+ aRenderTimeMs);
+ if (NS_FAILED(rv)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ if(mDecoderStatus != GMPNoErr){
+ mDecoderStatus = GMPNoErr;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoDecoder::RegisterDecodeCompleteCallback( webrtc::DecodedImageCallback* aCallback)
+{
+ MutexAutoLock lock(mCallbackMutex);
+ mCallback = aCallback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+
+/* static */ void
+WebrtcGmpVideoDecoder::ReleaseGmp_g(RefPtr<WebrtcGmpVideoDecoder>& aDecoder)
+{
+ aDecoder->Close_g();
+}
+
+int32_t
+WebrtcGmpVideoDecoder::ReleaseGmp()
+{
+ LOGD(("GMP Released:"));
+ RegisterDecodeCompleteCallback(nullptr);
+
+ if (mGMPThread) {
+ mGMPThread->Dispatch(
+ WrapRunnableNM(&WebrtcGmpVideoDecoder::ReleaseGmp_g,
+ RefPtr<WebrtcGmpVideoDecoder>(this)),
+ NS_DISPATCH_NORMAL);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcGmpVideoDecoder::Reset()
+{
+ // XXX ?
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void
+WebrtcGmpVideoDecoder::Terminated()
+{
+ LOGD(("GMP Decoder Terminated: %p", (void *)this));
+
+ mGMP->Close();
+ mGMP = nullptr;
+ mHost = nullptr;
+ mInitting = false;
+ // Could now notify that it's dead
+}
+
+void
+WebrtcGmpVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
+{
+ MutexAutoLock lock(mCallbackMutex);
+ if (mCallback) {
+ webrtc::I420VideoFrame image;
+ int ret = image.CreateFrame(aDecodedFrame->Buffer(kGMPYPlane),
+ aDecodedFrame->Buffer(kGMPUPlane),
+ aDecodedFrame->Buffer(kGMPVPlane),
+ aDecodedFrame->Width(),
+ aDecodedFrame->Height(),
+ aDecodedFrame->Stride(kGMPYPlane),
+ aDecodedFrame->Stride(kGMPUPlane),
+ aDecodedFrame->Stride(kGMPVPlane));
+ if (ret != 0) {
+ return;
+ }
+ image.set_timestamp((aDecodedFrame->Timestamp() * 90ll + 999)/1000); // round up
+ image.set_render_time_ms(0);
+
+ LOGD(("GMP Decoded: %llu", aDecodedFrame->Timestamp()));
+ mCallback->Decoded(image);
+ }
+ aDecodedFrame->Destroy();
+}
+
+}
diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h
new file mode 100644
index 000000000..0c01bf53c
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h
@@ -0,0 +1,528 @@
+/*
+ * Copyright (c) 2012, The WebRTC project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * * Neither the name of Google nor the names of its contributors may
+ * be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WEBRTCGMPVIDEOCODEC_H_
+#define WEBRTCGMPVIDEOCODEC_H_
+
+#include <iostream>
+#include <queue>
+#include <string>
+
+#include "nsThreadUtils.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Mutex.h"
+
+#include "mozIGeckoMediaPluginService.h"
+#include "MediaConduitInterface.h"
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+
+#include "gmp-video-host.h"
+#include "GMPVideoDecoderProxy.h"
+#include "GMPVideoEncoderProxy.h"
+
+#include "PeerConnectionImpl.h"
+
+namespace mozilla {
+
+// Class that allows code on the other side of webrtc.org to tell
+// WebrtcGmpVideoEncoder/Decoder what PC they should send errors to.
+// This is necessary because webrtc.org gives us no way to plumb the handle
+// through, nor does it give us any way to inform it of an error that will
+// make it back to the PC that cares (except for errors encountered
+// synchronously in functions like InitEncode/Decode, which will not happen
+// because GMP init is async).
+// Right now, this is used in MediaPipelineFactory.
+class WebrtcGmpPCHandleSetter
+{
+ public:
+ explicit WebrtcGmpPCHandleSetter(const std::string& aPCHandle);
+
+ ~WebrtcGmpPCHandleSetter();
+
+ static std::string GetCurrentHandle();
+
+ private:
+ static std::string sCurrentHandle;
+};
+
+class GmpInitDoneRunnable : public Runnable
+{
+ public:
+ explicit GmpInitDoneRunnable(const std::string& aPCHandle) :
+ mResult(WEBRTC_VIDEO_CODEC_OK),
+ mPCHandle(aPCHandle)
+ {
+ }
+
+ NS_IMETHOD Run() override
+ {
+ if (mResult == WEBRTC_VIDEO_CODEC_OK) {
+ // Might be useful to notify the PeerConnection about successful init
+ // someday.
+ return NS_OK;
+ }
+
+ PeerConnectionWrapper wrapper(mPCHandle);
+ if (wrapper.impl()) {
+ wrapper.impl()->OnMediaError(mError);
+ }
+ return NS_OK;
+ }
+
+ void Dispatch(int32_t aResult, const std::string& aError = "")
+ {
+ mResult = aResult;
+ mError = aError;
+ nsCOMPtr<nsIThread> mainThread(do_GetMainThread());
+ if (mainThread) {
+ // For some reason, the compiler on CI is treating |this| as a const
+ // pointer, despite the fact that we're in a non-const function. And,
+ // interestingly enough, correcting this doesn't require a const_cast.
+ mainThread->Dispatch(do_AddRef(static_cast<nsIRunnable*>(this)),
+ NS_DISPATCH_NORMAL);
+ }
+ }
+
+ int32_t Result()
+ {
+ return mResult;
+ }
+
+ private:
+ int32_t mResult;
+ std::string mPCHandle;
+ std::string mError;
+};
+
+class WebrtcGmpVideoEncoder : public GMPVideoEncoderCallbackProxy
+{
+public:
+ WebrtcGmpVideoEncoder();
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcGmpVideoEncoder);
+
+ // Implement VideoEncoder interface, sort of.
+ // (We cannot use |Release|, since that's needed for nsRefPtr)
+ virtual uint64_t PluginID() const
+ {
+ return mCachedPluginId;
+ }
+
+ virtual int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ uint32_t aMaxPayloadSize);
+
+ virtual int32_t Encode(const webrtc::I420VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes);
+
+ virtual int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback);
+
+ virtual int32_t ReleaseGmp();
+
+ virtual int32_t SetChannelParameters(uint32_t aPacketLoss,
+ int aRTT);
+
+ virtual int32_t SetRates(uint32_t aNewBitRate,
+ uint32_t aFrameRate);
+
+ // GMPVideoEncoderCallback virtual functions.
+ virtual void Terminated() override;
+
+ virtual void Encoded(GMPVideoEncodedFrame* aEncodedFrame,
+ const nsTArray<uint8_t>& aCodecSpecificInfo) override;
+
+ virtual void Error(GMPErr aError) override {
+ }
+
+private:
+ virtual ~WebrtcGmpVideoEncoder();
+
+ static void InitEncode_g(const RefPtr<WebrtcGmpVideoEncoder>& aThis,
+ const GMPVideoCodec& aCodecParams,
+ int32_t aNumberOfCores,
+ uint32_t aMaxPayloadSize,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone);
+ int32_t GmpInitDone(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost,
+ const GMPVideoCodec& aCodecParams,
+ uint32_t aMaxPayloadSize,
+ std::string* aErrorOut);
+ int32_t GmpInitDone(GMPVideoEncoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ std::string* aErrorOut);
+ int32_t InitEncoderForSize(unsigned short aWidth,
+ unsigned short aHeight,
+ std::string* aErrorOut);
+ static void ReleaseGmp_g(RefPtr<WebrtcGmpVideoEncoder>& aEncoder);
+ void Close_g();
+
+ class InitDoneCallback : public GetGMPVideoEncoderCallback
+ {
+ public:
+ InitDoneCallback(const RefPtr<WebrtcGmpVideoEncoder>& aEncoder,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone,
+ const GMPVideoCodec& aCodecParams,
+ uint32_t aMaxPayloadSize)
+ : mEncoder(aEncoder),
+ mInitDone(aInitDone),
+ mCodecParams(aCodecParams),
+ mMaxPayloadSize(aMaxPayloadSize)
+ {
+ }
+
+ virtual void Done(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost) override
+ {
+ std::string errorOut;
+ int32_t result = mEncoder->GmpInitDone(aGMP,
+ aHost,
+ mCodecParams,
+ mMaxPayloadSize,
+ &errorOut);
+
+ mInitDone->Dispatch(result, errorOut);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoEncoder> mEncoder;
+ RefPtr<GmpInitDoneRunnable> mInitDone;
+ GMPVideoCodec mCodecParams;
+ uint32_t mMaxPayloadSize;
+ };
+
+ int32_t Encode_g(const webrtc::I420VideoFrame* aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes);
+ void RegetEncoderForResolutionChange(
+ uint32_t aWidth,
+ uint32_t aHeight,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone);
+
+ class InitDoneForResolutionChangeCallback : public GetGMPVideoEncoderCallback
+ {
+ public:
+ InitDoneForResolutionChangeCallback(
+ const RefPtr<WebrtcGmpVideoEncoder>& aEncoder,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone,
+ uint32_t aWidth,
+ uint32_t aHeight)
+ : mEncoder(aEncoder),
+ mInitDone(aInitDone),
+ mWidth(aWidth),
+ mHeight(aHeight)
+ {
+ }
+
+ virtual void Done(GMPVideoEncoderProxy* aGMP, GMPVideoHost* aHost) override
+ {
+ std::string errorOut;
+ int32_t result = mEncoder->GmpInitDone(aGMP, aHost, &errorOut);
+ if (result != WEBRTC_VIDEO_CODEC_OK) {
+ mInitDone->Dispatch(result, errorOut);
+ return;
+ }
+
+ result = mEncoder->InitEncoderForSize(mWidth, mHeight, &errorOut);
+ mInitDone->Dispatch(result, errorOut);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoEncoder> mEncoder;
+ RefPtr<GmpInitDoneRunnable> mInitDone;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ };
+
+ static int32_t SetRates_g(RefPtr<WebrtcGmpVideoEncoder> aThis,
+ uint32_t aNewBitRate,
+ uint32_t aFrameRate);
+
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ nsCOMPtr<nsIThread> mGMPThread;
+ GMPVideoEncoderProxy* mGMP;
+ // Used to handle a race where Release() is called while init is in progress
+ bool mInitting;
+ GMPVideoHost* mHost;
+ GMPVideoCodec mCodecParams;
+ uint32_t mMaxPayloadSize;
+ webrtc::CodecSpecificInfo mCodecSpecificInfo;
+ // Protects mCallback
+ Mutex mCallbackMutex;
+ webrtc::EncodedImageCallback* mCallback;
+ uint64_t mCachedPluginId;
+ std::string mPCHandle;
+};
+
+
+// Basically a strong ref to a WebrtcGmpVideoEncoder, that also translates
+// from Release() to WebrtcGmpVideoEncoder::ReleaseGmp(), since we need
+// WebrtcGmpVideoEncoder::Release() for managing the refcount.
+// The webrtc.org code gets one of these, so it doesn't unilaterally delete
+// the "real" encoder.
+class WebrtcVideoEncoderProxy : public WebrtcVideoEncoder
+{
+ public:
+ WebrtcVideoEncoderProxy() :
+ mEncoderImpl(new WebrtcGmpVideoEncoder)
+ {}
+
+ virtual ~WebrtcVideoEncoderProxy()
+ {
+ RegisterEncodeCompleteCallback(nullptr);
+ }
+
+ uint64_t PluginID() const override
+ {
+ return mEncoderImpl->PluginID();
+ }
+
+ int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ size_t aMaxPayloadSize) override
+ {
+ return mEncoderImpl->InitEncode(aCodecSettings,
+ aNumberOfCores,
+ aMaxPayloadSize);
+ }
+
+ int32_t Encode(
+ const webrtc::I420VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes) override
+ {
+ return mEncoderImpl->Encode(aInputImage,
+ aCodecSpecificInfo,
+ aFrameTypes);
+ }
+
+ int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback) override
+ {
+ return mEncoderImpl->RegisterEncodeCompleteCallback(aCallback);
+ }
+
+ int32_t Release() override
+ {
+ return mEncoderImpl->ReleaseGmp();
+ }
+
+ int32_t SetChannelParameters(uint32_t aPacketLoss,
+ int64_t aRTT) override
+ {
+ return mEncoderImpl->SetChannelParameters(aPacketLoss, aRTT);
+ }
+
+ int32_t SetRates(uint32_t aNewBitRate,
+ uint32_t aFrameRate) override
+ {
+ return mEncoderImpl->SetRates(aNewBitRate, aFrameRate);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoEncoder> mEncoderImpl;
+};
+
+class WebrtcGmpVideoDecoder : public GMPVideoDecoderCallbackProxy
+{
+public:
+ WebrtcGmpVideoDecoder();
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcGmpVideoDecoder);
+
+ // Implement VideoEncoder interface, sort of.
+ // (We cannot use |Release|, since that's needed for nsRefPtr)
+ virtual uint64_t PluginID() const
+ {
+ return mCachedPluginId;
+ }
+
+ virtual int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores);
+ virtual int32_t Decode(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs);
+ virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* aCallback);
+
+ virtual int32_t ReleaseGmp();
+
+ virtual int32_t Reset();
+
+ // GMPVideoDecoderCallbackProxy
+ virtual void Terminated() override;
+
+ virtual void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
+
+ virtual void ReceivedDecodedReferenceFrame(const uint64_t aPictureId) override {
+ MOZ_CRASH();
+ }
+
+ virtual void ReceivedDecodedFrame(const uint64_t aPictureId) override {
+ MOZ_CRASH();
+ }
+
+ virtual void InputDataExhausted() override {
+ }
+
+ virtual void DrainComplete() override {
+ }
+
+ virtual void ResetComplete() override {
+ }
+
+ virtual void Error(GMPErr aError) override {
+ mDecoderStatus = aError;
+ }
+
+private:
+ virtual ~WebrtcGmpVideoDecoder();
+
+ static void InitDecode_g(
+ const RefPtr<WebrtcGmpVideoDecoder>& aThis,
+ const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone);
+ int32_t GmpInitDone(GMPVideoDecoderProxy* aGMP,
+ GMPVideoHost* aHost,
+ std::string* aErrorOut);
+ static void ReleaseGmp_g(RefPtr<WebrtcGmpVideoDecoder>& aDecoder);
+ void Close_g();
+
+ class InitDoneCallback : public GetGMPVideoDecoderCallback
+ {
+ public:
+ explicit InitDoneCallback(const RefPtr<WebrtcGmpVideoDecoder>& aDecoder,
+ const RefPtr<GmpInitDoneRunnable>& aInitDone)
+ : mDecoder(aDecoder),
+ mInitDone(aInitDone)
+ {
+ }
+
+ virtual void Done(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost)
+ {
+ std::string errorOut;
+ int32_t result = mDecoder->GmpInitDone(aGMP, aHost, &errorOut);
+
+ mInitDone->Dispatch(result, errorOut);
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoDecoder> mDecoder;
+ RefPtr<GmpInitDoneRunnable> mInitDone;
+ };
+
+ virtual int32_t Decode_g(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs);
+
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ nsCOMPtr<nsIThread> mGMPThread;
+ GMPVideoDecoderProxy* mGMP; // Addref is held for us
+ // Used to handle a race where Release() is called while init is in progress
+ bool mInitting;
+ GMPVideoHost* mHost;
+ // Protects mCallback
+ Mutex mCallbackMutex;
+ webrtc::DecodedImageCallback* mCallback;
+ Atomic<uint64_t> mCachedPluginId;
+ GMPErr mDecoderStatus;
+ std::string mPCHandle;
+};
+
+// Basically a strong ref to a WebrtcGmpVideoDecoder, that also translates
+// from Release() to WebrtcGmpVideoDecoder::ReleaseGmp(), since we need
+// WebrtcGmpVideoDecoder::Release() for managing the refcount.
+// The webrtc.org code gets one of these, so it doesn't unilaterally delete
+// the "real" encoder.
+class WebrtcVideoDecoderProxy : public WebrtcVideoDecoder
+{
+ public:
+ WebrtcVideoDecoderProxy() :
+ mDecoderImpl(new WebrtcGmpVideoDecoder)
+ {}
+
+ virtual ~WebrtcVideoDecoderProxy()
+ {
+ RegisterDecodeCompleteCallback(nullptr);
+ }
+
+ uint64_t PluginID() const override
+ {
+ return mDecoderImpl->PluginID();
+ }
+
+ int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumberOfCores) override
+ {
+ return mDecoderImpl->InitDecode(aCodecSettings, aNumberOfCores);
+ }
+
+ int32_t Decode(
+ const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs) override
+ {
+ return mDecoderImpl->Decode(aInputImage,
+ aMissingFrames,
+ aFragmentation,
+ aCodecSpecificInfo,
+ aRenderTimeMs);
+ }
+
+ int32_t RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback* aCallback) override
+ {
+ return mDecoderImpl->RegisterDecodeCompleteCallback(aCallback);
+ }
+
+ int32_t Release() override
+ {
+ return mDecoderImpl->ReleaseGmp();
+ }
+
+ int32_t Reset() override
+ {
+ return mDecoderImpl->Reset();
+ }
+
+ private:
+ RefPtr<WebrtcGmpVideoDecoder> mDecoderImpl;
+};
+
+}
+
+#endif
diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp
new file mode 100644
index 000000000..27b99d5ed
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp
@@ -0,0 +1,1004 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstdio>
+#include <iostream>
+#include <queue>
+
+#include "CSFLog.h"
+#include "nspr.h"
+
+#include "MediaCodec.h"
+#include "WebrtcMediaCodecVP8VideoCodec.h"
+#include "AndroidJNIWrapper.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Monitor.h"
+#include "runnable_utils.h"
+
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include "libyuv/convert_from.h"
+#include "libyuv/convert.h"
+#include "libyuv/row.h"
+
+#include <webrtc/common_video/libyuv/include/webrtc_libyuv.h>
+
+using namespace mozilla;
+using namespace mozilla::java::sdk;
+
+static const int32_t DECODER_TIMEOUT = 10 * PR_USEC_PER_MSEC; // 10ms
+static const char MEDIACODEC_VIDEO_MIME_VP8[] = "video/x-vnd.on2.vp8";
+
+namespace mozilla {
+
+static const char* logTag ="WebrtcMediaCodecVP8VideoCodec";
+
+static MediaCodec::LocalRef CreateDecoder(const char* aMimeType)
+{
+ if (!aMimeType) {
+ return nullptr;
+ }
+
+ MediaCodec::LocalRef codec;
+ MediaCodec::CreateDecoderByType(aMimeType, &codec);
+ return codec;
+}
+
+static MediaCodec::LocalRef CreateEncoder(const char* aMimeType)
+{
+ if (!aMimeType) {
+ return nullptr;
+ }
+
+ MediaCodec::LocalRef codec;
+ MediaCodec::CreateEncoderByType(aMimeType, &codec);
+ return codec;
+}
+
+static void
+ShutdownThread(nsCOMPtr<nsIThread>& aThread)
+{
+ aThread->Shutdown();
+}
+
+// Base runnable class to repeatly pull MediaCodec output buffers in seperate thread.
+// How to use:
+// - implementing DrainOutput() to get output. Remember to return false to tell
+// drain not to pop input queue.
+// - call QueueInput() to schedule a run to drain output. The input, aFrame,
+// should contains corresponding info such as image size and timestamps for
+// DrainOutput() implementation to construct data needed by encoded/decoded
+// callbacks.
+class MediaCodecOutputDrain : public Runnable
+{
+public:
+ void Start() {
+ MonitorAutoLock lock(mMonitor);
+ if (mThread == nullptr) {
+ NS_NewNamedThread("OutputDrain", getter_AddRefs(mThread));
+ }
+ mEnding = false;
+ mThread->Dispatch(this, NS_DISPATCH_NORMAL);
+ }
+
+ void Stop() {
+ MonitorAutoLock lock(mMonitor);
+ mEnding = true;
+ lock.NotifyAll(); // In case Run() is waiting.
+
+ if (mThread != nullptr) {
+ MonitorAutoUnlock unlock(mMonitor);
+ NS_DispatchToMainThread(
+ WrapRunnableNM<decltype(&ShutdownThread),
+ nsCOMPtr<nsIThread> >(&ShutdownThread, mThread));
+ mThread = nullptr;
+ }
+ }
+
+ void QueueInput(const EncodedFrame& aFrame)
+ {
+ MonitorAutoLock lock(mMonitor);
+
+ MOZ_ASSERT(mThread);
+
+ mInputFrames.push(aFrame);
+ // Notify Run() about queued input and it can start working.
+ lock.NotifyAll();
+ }
+
+ NS_IMETHOD Run() override
+ {
+ MOZ_ASSERT(mThread);
+
+ MonitorAutoLock lock(mMonitor);
+ while (true) {
+ if (mInputFrames.empty()) {
+ // Wait for new input.
+ lock.Wait();
+ }
+
+ if (mEnding) {
+ // Stop draining.
+ break;
+ }
+
+ MOZ_ASSERT(!mInputFrames.empty());
+ {
+ // Release monitor while draining because it's blocking.
+ MonitorAutoUnlock unlock(mMonitor);
+ DrainOutput();
+ }
+ }
+
+ return NS_OK;
+ }
+
+protected:
+ MediaCodecOutputDrain()
+ : mMonitor("MediaCodecOutputDrain monitor")
+ , mEnding(false)
+ {}
+
+ // Drain output buffer for input frame queue mInputFrames.
+ // mInputFrames contains info such as size and time of the input frames.
+ // We have to give a queue to handle encoder frame skips - we can input 10
+ // frames and get one back. NOTE: any access of aInputFrames MUST be preceded
+ // locking mMonitor!
+
+ // Blocks waiting for decoded buffers, but for a limited period because
+ // we need to check for shutdown.
+ virtual bool DrainOutput() = 0;
+
+protected:
+ // This monitor protects all things below it, and is also used to
+ // wait/notify queued input.
+ Monitor mMonitor;
+ std::queue<EncodedFrame> mInputFrames;
+
+private:
+ // also protected by mMonitor
+ nsCOMPtr<nsIThread> mThread;
+ bool mEnding;
+};
+
+class WebrtcAndroidMediaCodec {
+public:
+ WebrtcAndroidMediaCodec()
+ : mEncoderCallback(nullptr)
+ , mDecoderCallback(nullptr)
+ , isStarted(false)
+ , mEnding(false) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ }
+
+ nsresult Configure(uint32_t width,
+ uint32_t height,
+ const jobject aSurface,
+ uint32_t flags,
+ const char* mime,
+ bool encoder) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ nsresult res = NS_OK;
+
+ if (!mCoder) {
+ mWidth = width;
+ mHeight = height;
+
+ MediaFormat::LocalRef format;
+
+ res = MediaFormat::CreateVideoFormat(nsCString(mime),
+ mWidth,
+ mHeight,
+ &format);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, CreateVideoFormat failed err = %d", __FUNCTION__, (int)res);
+ return NS_ERROR_FAILURE;
+ }
+
+ if (encoder) {
+ mCoder = CreateEncoder(mime);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, CreateEncoderByType failed err = %d", __FUNCTION__, (int)res);
+ return NS_ERROR_FAILURE;
+ }
+
+ res = format->SetInteger(nsCString("bitrate"), 1000*300);
+ res = format->SetInteger(nsCString("bitrate-mode"), 2);
+ res = format->SetInteger(nsCString("color-format"), 21);
+ res = format->SetInteger(nsCString("frame-rate"), 30);
+ res = format->SetInteger(nsCString("i-frame-interval"), 100);
+
+ } else {
+ mCoder = CreateDecoder(mime);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, CreateDecoderByType failed err = %d", __FUNCTION__, (int)res);
+ return NS_ERROR_FAILURE;
+ }
+ }
+ res = mCoder->Configure(format, nullptr, nullptr, flags);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, err = %d", __FUNCTION__, (int)res);
+ }
+ }
+
+ return res;
+ }
+
+ nsresult Start() {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ if (!mCoder) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mEnding = false;
+
+ nsresult res;
+ res = mCoder->Start();
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->start() return err = %d",
+ __FUNCTION__, (int)res);
+ return res;
+ }
+ isStarted = true;
+ return NS_OK;
+ }
+
+ nsresult Stop() {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ mEnding = true;
+
+ if (mOutputDrain != nullptr) {
+ mOutputDrain->Stop();
+ mOutputDrain = nullptr;
+ }
+
+ mCoder->Stop();
+ mCoder->Release();
+ isStarted = false;
+ return NS_OK;
+ }
+
+ void GenerateVideoFrame(
+ size_t width, size_t height, uint32_t timeStamp,
+ void* decoded,
+ webrtc::I420VideoFrame* videoFrame, int color_format) {
+
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ // TODO: eliminate extra pixel copy/color conversion
+ size_t widthUV = (width + 1) / 2;
+ if (videoFrame->CreateEmptyFrame(width, height, width, widthUV, widthUV)) {
+ return;
+ }
+
+ uint8_t* src_nv12 = static_cast<uint8_t *>(decoded);
+ int src_nv12_y_size = width * height;
+
+ uint8_t* dstY = videoFrame->buffer(webrtc::kYPlane);
+ uint8_t* dstU = videoFrame->buffer(webrtc::kUPlane);
+ uint8_t* dstV = videoFrame->buffer(webrtc::kVPlane);
+
+ libyuv::NV12ToI420(src_nv12, width,
+ src_nv12 + src_nv12_y_size, (width + 1) & ~1,
+ dstY, width,
+ dstU, (width + 1) / 2,
+ dstV,
+ (width + 1) / 2,
+ width, height);
+
+ videoFrame->set_timestamp(timeStamp);
+ }
+
+ int32_t
+ FeedMediaCodecInput(
+ const webrtc::EncodedImage& inputImage,
+ int64_t renderTimeMs) {
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+#endif
+
+ int inputIndex = DequeueInputBuffer(DECODER_TIMEOUT);
+ if (inputIndex == -1) {
+ CSFLogError(logTag, "%s equeue input buffer failed", __FUNCTION__);
+ return inputIndex;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(logTag, "%s dequeue input buffer took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time));
+ time = PR_IntervalNow();
+#endif
+
+ size_t size = inputImage._length;
+
+ JNIEnv* env = jsjni_GetJNIForThread();
+ jobject buffer = env->GetObjectArrayElement(mInputBuffers, inputIndex);
+ void* directBuffer = env->GetDirectBufferAddress(buffer);
+
+ PodCopy((uint8_t*)directBuffer, inputImage._buffer, size);
+
+ if (inputIndex >= 0) {
+ CSFLogError(logTag, "%s queue input buffer inputIndex = %d", __FUNCTION__, inputIndex);
+ QueueInputBuffer(inputIndex, 0, size, renderTimeMs, 0);
+
+ {
+ if (mOutputDrain == nullptr) {
+ mOutputDrain = new OutputDrain(this);
+ mOutputDrain->Start();
+ }
+ EncodedFrame frame;
+ frame.width_ = mWidth;
+ frame.height_ = mHeight;
+ frame.timeStamp_ = inputImage._timeStamp;
+ frame.decode_timestamp_ = renderTimeMs;
+ mOutputDrain->QueueInput(frame);
+ }
+ env->DeleteLocalRef(buffer);
+ }
+
+ return inputIndex;
+ }
+
+ nsresult
+ DrainOutput(std::queue<EncodedFrame>& aInputFrames, Monitor& aMonitor) {
+ MOZ_ASSERT(mCoder != nullptr);
+ if (mCoder == nullptr) {
+ return NS_ERROR_FAILURE;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+#endif
+ nsresult res;
+ BufferInfo::LocalRef bufferInfo;
+ res = BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, BufferInfo::New return err = %d",
+ __FUNCTION__, (int)res);
+ return res;
+ }
+ int32_t outputIndex = DequeueOutputBuffer(bufferInfo);
+
+ if (outputIndex == MediaCodec::INFO_TRY_AGAIN_LATER) {
+ // Not an error: output not available yet. Try later.
+ CSFLogDebug(logTag, "%s dequeue output buffer try again:%d", __FUNCTION__, outputIndex);
+ } else if (outputIndex == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
+ // handle format change
+ CSFLogDebug(logTag, "%s dequeue output buffer format changed:%d", __FUNCTION__, outputIndex);
+ } else if (outputIndex == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
+ CSFLogDebug(logTag, "%s dequeue output buffer changed:%d", __FUNCTION__, outputIndex);
+ GetOutputBuffers();
+ } else if (outputIndex < 0) {
+ CSFLogDebug(logTag, "%s dequeue output buffer unknow error:%d", __FUNCTION__, outputIndex);
+ MonitorAutoLock lock(aMonitor);
+ aInputFrames.pop();
+ } else {
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(logTag, "%s dequeue output buffer# return status is %d took %u ms", __FUNCTION__, outputIndex, PR_IntervalToMilliseconds(PR_IntervalNow()-time));
+#endif
+ EncodedFrame frame;
+ {
+ MonitorAutoLock lock(aMonitor);
+ frame = aInputFrames.front();
+ aInputFrames.pop();
+ }
+
+ if (mEnding) {
+ ReleaseOutputBuffer(outputIndex, false);
+ return NS_OK;
+ }
+
+ JNIEnv* env = jsjni_GetJNIForThread();
+ jobject buffer = env->GetObjectArrayElement(mOutputBuffers, outputIndex);
+ if (buffer) {
+ // The buffer will be null on Android L if we are decoding to a Surface
+ void* directBuffer = env->GetDirectBufferAddress(buffer);
+
+ int color_format = 0;
+
+ CSFLogDebug(logTag, "%s generate video frame, width = %d, height = %d, timeStamp_ = %d", __FUNCTION__, frame.width_, frame.height_, frame.timeStamp_);
+ GenerateVideoFrame(frame.width_, frame.height_, frame.timeStamp_, directBuffer, &mVideoFrame, color_format);
+ mDecoderCallback->Decoded(mVideoFrame);
+
+ ReleaseOutputBuffer(outputIndex, false);
+ env->DeleteLocalRef(buffer);
+ }
+ }
+ return NS_OK;
+ }
+
+ int32_t DequeueInputBuffer(int64_t time) {
+ nsresult res;
+ int32_t inputIndex;
+ res = mCoder->DequeueInputBuffer(time, &inputIndex);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->DequeueInputBuffer() return err = %d",
+ __FUNCTION__, (int)res);
+ return -1;
+ }
+ return inputIndex;
+ }
+
+ void QueueInputBuffer(int32_t inputIndex, int32_t offset, size_t size, int64_t renderTimes, int32_t flags) {
+ nsresult res = NS_OK;
+ res = mCoder->QueueInputBuffer(inputIndex, offset, size, renderTimes, flags);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->QueueInputBuffer() return err = %d",
+ __FUNCTION__, (int)res);
+ }
+ }
+
+ int32_t DequeueOutputBuffer(BufferInfo::Param aInfo) {
+ nsresult res;
+
+ int32_t outputStatus;
+ res = mCoder->DequeueOutputBuffer(aInfo, DECODER_TIMEOUT, &outputStatus);
+
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, mCoder->DequeueOutputBuffer() return err = %d",
+ __FUNCTION__, (int)res);
+ return -1;
+ }
+
+ return outputStatus;
+ }
+
+ void ReleaseOutputBuffer(int32_t index, bool flag) {
+ mCoder->ReleaseOutputBuffer(index, flag);
+ }
+
+ jobjectArray GetInputBuffers() {
+ JNIEnv* env = jsjni_GetJNIForThread();
+
+ if (mInputBuffers) {
+ env->DeleteGlobalRef(mInputBuffers);
+ }
+
+ nsresult res;
+ jni::ObjectArray::LocalRef inputBuffers;
+ res = mCoder->GetInputBuffers(&inputBuffers);
+ mInputBuffers = (jobjectArray) env->NewGlobalRef(inputBuffers.Get());
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, GetInputBuffers return err = %d",
+ __FUNCTION__, (int)res);
+ return nullptr;
+ }
+
+ return mInputBuffers;
+ }
+
+ jobjectArray GetOutputBuffers() {
+ JNIEnv* env = jsjni_GetJNIForThread();
+
+ if (mOutputBuffers) {
+ env->DeleteGlobalRef(mOutputBuffers);
+ }
+
+ nsresult res;
+ jni::ObjectArray::LocalRef outputBuffers;
+ res = mCoder->GetOutputBuffers(&outputBuffers);
+ mOutputBuffers = (jobjectArray) env->NewGlobalRef(outputBuffers.Get());
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcAndroidMediaCodec::%s, GetOutputBuffers return err = %d",
+ __FUNCTION__, (int)res);
+ return nullptr;
+ }
+
+ return mOutputBuffers;
+ }
+
+ void SetDecoderCallback(webrtc::DecodedImageCallback* aCallback) {
+ mDecoderCallback = aCallback;
+ }
+
+ void SetEncoderCallback(webrtc::EncodedImageCallback* aCallback) {
+ mEncoderCallback = aCallback;
+ }
+
+protected:
+ virtual ~WebrtcAndroidMediaCodec() {
+ }
+
+private:
+class OutputDrain : public MediaCodecOutputDrain
+ {
+ public:
+ OutputDrain(WebrtcAndroidMediaCodec* aMediaCodec)
+ : MediaCodecOutputDrain()
+ , mMediaCodec(aMediaCodec)
+ {}
+
+ protected:
+ virtual bool DrainOutput() override
+ {
+ return (mMediaCodec->DrainOutput(mInputFrames, mMonitor) == NS_OK);
+ }
+
+ private:
+ WebrtcAndroidMediaCodec* mMediaCodec;
+ };
+
+ friend class WebrtcMediaCodecVP8VideoEncoder;
+ friend class WebrtcMediaCodecVP8VideoDecoder;
+
+ MediaCodec::GlobalRef mCoder;
+ webrtc::EncodedImageCallback* mEncoderCallback;
+ webrtc::DecodedImageCallback* mDecoderCallback;
+ webrtc::I420VideoFrame mVideoFrame;
+
+ jobjectArray mInputBuffers;
+ jobjectArray mOutputBuffers;
+
+ RefPtr<OutputDrain> mOutputDrain;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ bool isStarted;
+ bool mEnding;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcAndroidMediaCodec)
+};
+
+static bool I420toNV12(uint8_t* dstY, uint16_t* dstUV, const webrtc::I420VideoFrame& inputImage) {
+ uint8_t* buffer = dstY;
+ uint8_t* dst_y = buffer;
+ int dst_stride_y = inputImage.stride(webrtc::kYPlane);
+ uint8_t* dst_uv = buffer + inputImage.stride(webrtc::kYPlane) *
+ inputImage.height();
+ int dst_stride_uv = inputImage.stride(webrtc::kUPlane) * 2;
+
+ // Why NV12? Because COLOR_FORMAT_YUV420_SEMIPLANAR. Most hardware is NV12-friendly.
+ bool converted = !libyuv::I420ToNV12(inputImage.buffer(webrtc::kYPlane),
+ inputImage.stride(webrtc::kYPlane),
+ inputImage.buffer(webrtc::kUPlane),
+ inputImage.stride(webrtc::kUPlane),
+ inputImage.buffer(webrtc::kVPlane),
+ inputImage.stride(webrtc::kVPlane),
+ dst_y,
+ dst_stride_y,
+ dst_uv,
+ dst_stride_uv,
+ inputImage.width(),
+ inputImage.height());
+ return converted;
+}
+
+// Encoder.
+WebrtcMediaCodecVP8VideoEncoder::WebrtcMediaCodecVP8VideoEncoder()
+ : mTimestamp(0)
+ , mCallback(nullptr)
+ , mMediaCodecEncoder(nullptr) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ memset(&mEncodedImage, 0, sizeof(mEncodedImage));
+}
+
+bool WebrtcMediaCodecVP8VideoEncoder::ResetInputBuffers() {
+ mInputBuffers = mMediaCodecEncoder->GetInputBuffers();
+
+ if (!mInputBuffers)
+ return false;
+
+ return true;
+}
+
+bool WebrtcMediaCodecVP8VideoEncoder::ResetOutputBuffers() {
+ mOutputBuffers = mMediaCodecEncoder->GetOutputBuffers();
+
+ if (!mOutputBuffers)
+ return false;
+
+ return true;
+}
+
+int32_t
+WebrtcMediaCodecVP8VideoEncoder::VerifyAndAllocate(const uint32_t minimumSize)
+{
+ if(minimumSize > mEncodedImage._size)
+ {
+ // create buffer of sufficient size
+ uint8_t* newBuffer = new uint8_t[minimumSize];
+ if (newBuffer == nullptr) {
+ return -1;
+ }
+ if(mEncodedImage._buffer) {
+ // copy old data
+ memcpy(newBuffer, mEncodedImage._buffer, mEncodedImage._size);
+ delete [] mEncodedImage._buffer;
+ }
+ mEncodedImage._buffer = newBuffer;
+ mEncodedImage._size = minimumSize;
+ }
+ return 0;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::InitEncode(
+ const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize) {
+ mMaxPayloadSize = maxPayloadSize;
+ CSFLogDebug(logTag, "%s, w = %d, h = %d", __FUNCTION__, codecSettings->width, codecSettings->height);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::Encode(
+ const webrtc::I420VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* frame_types) {
+ CSFLogDebug(logTag, "%s, w = %d, h = %d", __FUNCTION__, inputImage.width(), inputImage.height());
+
+ if (!mMediaCodecEncoder) {
+ mMediaCodecEncoder = new WebrtcAndroidMediaCodec();
+ }
+
+ if (!mMediaCodecEncoder->isStarted) {
+ if (inputImage.width() == 0 || inputImage.height() == 0) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ } else {
+ mFrameWidth = inputImage.width();
+ mFrameHeight = inputImage.height();
+ }
+
+ mMediaCodecEncoder->SetEncoderCallback(mCallback);
+ nsresult res = mMediaCodecEncoder->Configure(mFrameWidth, mFrameHeight, nullptr, MediaCodec::CONFIGURE_FLAG_ENCODE, MEDIACODEC_VIDEO_MIME_VP8, true /* encoder */);
+
+ if (res != NS_OK) {
+ CSFLogDebug(logTag, "%s, encoder configure return err = %d",
+ __FUNCTION__, (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ res = mMediaCodecEncoder->Start();
+
+ if (NS_FAILED(res)) {
+ mMediaCodecEncoder->isStarted = false;
+ CSFLogDebug(logTag, "%s start encoder. err = %d", __FUNCTION__, (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ bool retBool = ResetInputBuffers();
+ if (!retBool) {
+ CSFLogDebug(logTag, "%s ResetInputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ retBool = ResetOutputBuffers();
+ if (!retBool) {
+ CSFLogDebug(logTag, "%s ResetOutputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mMediaCodecEncoder->isStarted = true;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+#endif
+
+ size_t sizeY = inputImage.allocated_size(webrtc::kYPlane);
+ size_t sizeUV = inputImage.allocated_size(webrtc::kUPlane);
+ size_t size = sizeY + 2 * sizeUV;
+
+ int inputIndex = mMediaCodecEncoder->DequeueInputBuffer(DECODER_TIMEOUT);
+ if (inputIndex == -1) {
+ CSFLogError(logTag, "%s dequeue input buffer failed", __FUNCTION__);
+ return inputIndex;
+ }
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(logTag, "%s WebrtcMediaCodecVP8VideoEncoder::Encode() dequeue OMX input buffer took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time));
+#endif
+
+ if (inputIndex >= 0) {
+ JNIEnv* env = jsjni_GetJNIForThread();
+ jobject buffer = env->GetObjectArrayElement(mInputBuffers, inputIndex);
+ void* directBuffer = env->GetDirectBufferAddress(buffer);
+
+ uint8_t* dstY = static_cast<uint8_t*>(directBuffer);
+ uint16_t* dstUV = reinterpret_cast<uint16_t*>(dstY + sizeY);
+
+ bool converted = I420toNV12(dstY, dstUV, inputImage);
+ if (!converted) {
+ CSFLogError(logTag, "%s WebrtcMediaCodecVP8VideoEncoder::Encode() convert input buffer to NV12 error.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ env->DeleteLocalRef(buffer);
+
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ time = PR_IntervalNow();
+ CSFLogError(logTag, "%s queue input buffer inputIndex = %d", __FUNCTION__, inputIndex);
+#endif
+
+ mMediaCodecEncoder->QueueInputBuffer(inputIndex, 0, size, inputImage.render_time_ms() * PR_USEC_PER_MSEC /* ms to us */, 0);
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(logTag, "%s WebrtcMediaCodecVP8VideoEncoder::Encode() queue input buffer took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time));
+#endif
+ mEncodedImage._encodedWidth = inputImage.width();
+ mEncodedImage._encodedHeight = inputImage.height();
+ mEncodedImage._timeStamp = inputImage.timestamp();
+ mEncodedImage.capture_time_ms_ = inputImage.timestamp();
+
+ nsresult res;
+ BufferInfo::LocalRef bufferInfo;
+ res = BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(res)) {
+ CSFLogDebug(logTag, "WebrtcMediaCodecVP8VideoEncoder::%s, BufferInfo::New return err = %d",
+ __FUNCTION__, (int)res);
+ return -1;
+ }
+
+ int32_t outputIndex = mMediaCodecEncoder->DequeueOutputBuffer(bufferInfo);
+
+ if (outputIndex == MediaCodec::INFO_TRY_AGAIN_LATER) {
+ // Not an error: output not available yet. Try later.
+ CSFLogDebug(logTag, "%s dequeue output buffer try again:%d", __FUNCTION__, outputIndex);
+ } else if (outputIndex == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
+ // handle format change
+ CSFLogDebug(logTag, "%s dequeue output buffer format changed:%d", __FUNCTION__, outputIndex);
+ } else if (outputIndex == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
+ CSFLogDebug(logTag, "%s dequeue output buffer changed:%d", __FUNCTION__, outputIndex);
+ mMediaCodecEncoder->GetOutputBuffers();
+ } else if (outputIndex < 0) {
+ CSFLogDebug(logTag, "%s dequeue output buffer unknow error:%d", __FUNCTION__, outputIndex);
+ } else {
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(logTag, "%s dequeue output buffer return status is %d took %u ms", __FUNCTION__, outputIndex, PR_IntervalToMilliseconds(PR_IntervalNow()-time));
+#endif
+
+ JNIEnv* env = jsjni_GetJNIForThread();
+ jobject buffer = env->GetObjectArrayElement(mOutputBuffers, outputIndex);
+ if (buffer) {
+ int32_t offset;
+ bufferInfo->Offset(&offset);
+ int32_t flags;
+ bufferInfo->Flags(&flags);
+
+ // The buffer will be null on Android L if we are decoding to a Surface
+ void* directBuffer = reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(buffer)) + offset;
+
+ if (flags == MediaCodec::BUFFER_FLAG_SYNC_FRAME) {
+ mEncodedImage._frameType = webrtc::kKeyFrame;
+ } else {
+ mEncodedImage._frameType = webrtc::kDeltaFrame;
+ }
+ mEncodedImage._completeFrame = true;
+
+ int32_t size;
+ bufferInfo->Size(&size);
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ CSFLogDebug(logTag, "%s dequeue output buffer ok, index:%d, buffer size = %d, buffer offset = %d, flags = %d", __FUNCTION__, outputIndex, size, offset, flags);
+#endif
+
+ if(VerifyAndAllocate(size) == -1) {
+ CSFLogDebug(logTag, "%s VerifyAndAllocate buffers failed", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mEncodedImage._length = size;
+
+ // xxx It's too bad the mediacodec API forces us to memcpy this....
+ // we should find a way that able to 'hold' the buffer or transfer it from inputImage (ping-pong
+ // buffers or select them from a small pool)
+ memcpy(mEncodedImage._buffer, directBuffer, mEncodedImage._length);
+
+ webrtc::CodecSpecificInfo info;
+ info.codecType = webrtc::kVideoCodecVP8;
+ info.codecSpecific.VP8.pictureId = -1;
+ info.codecSpecific.VP8.tl0PicIdx = -1;
+ info.codecSpecific.VP8.keyIdx = -1;
+ info.codecSpecific.VP8.temporalIdx = 1;
+
+ // Generate a header describing a single fragment.
+ webrtc::RTPFragmentationHeader header;
+ memset(&header, 0, sizeof(header));
+ header.VerifyAndAllocateFragmentationHeader(1);
+ header.fragmentationLength[0] = mEncodedImage._length;
+
+ mCallback->Encoded(mEncodedImage, &info, &header);
+
+ mMediaCodecEncoder->ReleaseOutputBuffer(outputIndex, false);
+ env->DeleteLocalRef(buffer);
+ }
+ }
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* callback) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ mCallback = callback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::Release() {
+
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ delete mMediaCodecEncoder;
+ mMediaCodecEncoder = nullptr;
+
+ delete [] mEncodedImage._buffer;
+ mEncodedImage._buffer = nullptr;
+ mEncodedImage._size = 0;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcMediaCodecVP8VideoEncoder::~WebrtcMediaCodecVP8VideoEncoder() {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ Release();
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoEncoder::SetRates(uint32_t newBitRate, uint32_t frameRate) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ if (!mMediaCodecEncoder) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // XXX
+ // 1. implement MediaCodec's setParameters method
+ // 2.find a way to initiate a Java Bundle instance as parameter for MediaCodec setParameters method.
+ // mMediaCodecEncoder->setParameters
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// Decoder.
+WebrtcMediaCodecVP8VideoDecoder::WebrtcMediaCodecVP8VideoDecoder()
+ : mCallback(nullptr)
+ , mFrameWidth(0)
+ , mFrameHeight(0)
+ , mMediaCodecDecoder(nullptr) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+}
+
+bool WebrtcMediaCodecVP8VideoDecoder::ResetInputBuffers() {
+ mInputBuffers = mMediaCodecDecoder->GetInputBuffers();
+
+ if (!mInputBuffers)
+ return false;
+
+ return true;
+}
+
+bool WebrtcMediaCodecVP8VideoDecoder::ResetOutputBuffers() {
+ mOutputBuffers = mMediaCodecDecoder->GetOutputBuffers();
+
+ if (!mOutputBuffers)
+ return false;
+
+ return true;
+}
+
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::InitDecode(
+ const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores) {
+
+ if (!mMediaCodecDecoder) {
+ mMediaCodecDecoder = new WebrtcAndroidMediaCodec();
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::Decode(
+ const webrtc::EncodedImage& inputImage,
+ bool missingFrames,
+ const webrtc::RTPFragmentationHeader* fragmentation,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ int64_t renderTimeMs) {
+
+ CSFLogDebug(logTag, "%s, renderTimeMs = %lld ", __FUNCTION__, renderTimeMs);
+
+ if (inputImage._length== 0 || !inputImage._buffer) {
+ CSFLogDebug(logTag, "%s, input Image invalid. length = %d", __FUNCTION__, inputImage._length);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (inputImage._frameType == webrtc::kKeyFrame) {
+ CSFLogDebug(logTag, "%s, inputImage is Golden frame",
+ __FUNCTION__);
+ mFrameWidth = inputImage._encodedWidth;
+ mFrameHeight = inputImage._encodedHeight;
+ }
+
+ if (!mMediaCodecDecoder->isStarted) {
+ if (mFrameWidth == 0 || mFrameHeight == 0) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mMediaCodecDecoder->SetDecoderCallback(mCallback);
+ nsresult res = mMediaCodecDecoder->Configure(mFrameWidth, mFrameHeight, nullptr, 0, MEDIACODEC_VIDEO_MIME_VP8, false /* decoder */);
+
+ if (res != NS_OK) {
+ CSFLogDebug(logTag, "%s, decoder configure return err = %d",
+ __FUNCTION__, (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ res = mMediaCodecDecoder->Start();
+
+ if (NS_FAILED(res)) {
+ mMediaCodecDecoder->isStarted = false;
+ CSFLogDebug(logTag, "%s start decoder. err = %d", __FUNCTION__, (int)res);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ bool retBool = ResetInputBuffers();
+ if (!retBool) {
+ CSFLogDebug(logTag, "%s ResetInputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ retBool = ResetOutputBuffers();
+ if (!retBool) {
+ CSFLogDebug(logTag, "%s ResetOutputBuffers failed.", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ mMediaCodecDecoder->isStarted = true;
+ }
+#ifdef WEBRTC_MEDIACODEC_DEBUG
+ uint32_t time = PR_IntervalNow();
+ CSFLogDebug(logTag, "%s start decoder took %u ms", __FUNCTION__, PR_IntervalToMilliseconds(PR_IntervalNow()-time));
+#endif
+
+ bool feedFrame = true;
+ int32_t ret = WEBRTC_VIDEO_CODEC_ERROR;
+
+ while (feedFrame) {
+ ret = mMediaCodecDecoder->FeedMediaCodecInput(inputImage, renderTimeMs);
+ feedFrame = (ret == -1);
+ }
+
+ CSFLogDebug(logTag, "%s end, ret = %d", __FUNCTION__, ret);
+
+ return ret;
+}
+
+void WebrtcMediaCodecVP8VideoDecoder::DecodeFrame(EncodedFrame* frame) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ mCallback = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::Release() {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ delete mMediaCodecDecoder;
+ mMediaCodecDecoder = nullptr;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcMediaCodecVP8VideoDecoder::~WebrtcMediaCodecVP8VideoDecoder() {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+
+ Release();
+}
+
+int32_t WebrtcMediaCodecVP8VideoDecoder::Reset() {
+ CSFLogDebug(logTag, "%s ", __FUNCTION__);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+}
diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h
new file mode 100644
index 000000000..9d7e900fe
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h
@@ -0,0 +1,114 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WebrtcMediaCodecVP8VideoCodec_h__
+#define WebrtcMediaCodecVP8VideoCodec_h__
+
+#include "mozilla/Mutex.h"
+#include "nsThreadUtils.h"
+#include "nsAutoPtr.h"
+
+#include "MediaConduitInterface.h"
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+
+namespace mozilla {
+
+struct EncodedFrame {
+ uint32_t width_;
+ uint32_t height_;
+ uint32_t timeStamp_;
+ uint64_t decode_timestamp_;
+};
+
+class WebrtcAndroidMediaCodec;
+
+class WebrtcMediaCodecVP8VideoEncoder : public WebrtcVideoEncoder {
+public:
+ WebrtcMediaCodecVP8VideoEncoder();
+
+ virtual ~WebrtcMediaCodecVP8VideoEncoder() override;
+
+ // Implement VideoEncoder interface.
+ virtual uint64_t PluginID() const override { return 0; }
+
+ virtual int32_t InitEncode(const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize) override;
+
+ virtual int32_t Encode(const webrtc::I420VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* frame_types) override;
+
+ virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* callback) override;
+
+ virtual int32_t Release() override;
+
+ virtual int32_t SetChannelParameters(uint32_t packetLoss, int64_t rtt) override;
+
+ virtual int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override;
+
+private:
+ int32_t VerifyAndAllocate(const uint32_t minimumSize);
+ bool ResetInputBuffers();
+ bool ResetOutputBuffers();
+
+ size_t mMaxPayloadSize;
+ uint32_t mTimestamp;
+ webrtc::EncodedImage mEncodedImage;
+ webrtc::EncodedImageCallback* mCallback;
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+
+ WebrtcAndroidMediaCodec* mMediaCodecEncoder;
+
+ jobjectArray mInputBuffers;
+ jobjectArray mOutputBuffers;
+};
+
+class WebrtcMediaCodecVP8VideoDecoder : public WebrtcVideoDecoder {
+public:
+ WebrtcMediaCodecVP8VideoDecoder();
+
+ virtual ~WebrtcMediaCodecVP8VideoDecoder() override;
+
+ // Implement VideoDecoder interface.
+ virtual uint64_t PluginID() const override { return 0; }
+
+ virtual int32_t InitDecode(const webrtc::VideoCodec* codecSettings,
+ int32_t numberOfCores) override;
+
+ virtual int32_t Decode(const webrtc::EncodedImage& inputImage,
+ bool missingFrames,
+ const webrtc::RTPFragmentationHeader* fragmentation,
+ const webrtc::CodecSpecificInfo*
+ codecSpecificInfo = NULL,
+ int64_t renderTimeMs = -1) override;
+
+ virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) override;
+
+ virtual int32_t Release() override;
+
+ virtual int32_t Reset() override;
+
+private:
+ void DecodeFrame(EncodedFrame* frame);
+ void RunCallback();
+ bool ResetInputBuffers();
+ bool ResetOutputBuffers();
+
+ webrtc::DecodedImageCallback* mCallback;
+
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+
+ WebrtcAndroidMediaCodec* mMediaCodecDecoder;
+ jobjectArray mInputBuffers;
+ jobjectArray mOutputBuffers;
+
+};
+
+}
+
+#endif // WebrtcMediaCodecVP8VideoCodec_h__
diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp
new file mode 100644
index 000000000..dc052f4e0
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp
@@ -0,0 +1,1253 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+
+#include "WebrtcOMXH264VideoCodec.h"
+
+// Android/Stagefright
+#include <avc_utils.h>
+#include <binder/ProcessState.h>
+#include <foundation/ABuffer.h>
+#include <foundation/AMessage.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <OMX_Component.h>
+using namespace android;
+
+// WebRTC
+//#include "webrtc/common_video/interface/texture_video_frame.h"
+#include "webrtc/video_engine/include/vie_external_codec.h"
+#include "runnable_utils.h"
+
+// Gecko
+#if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21
+#include "GonkBufferQueueProducer.h"
+#endif
+#include "GonkNativeWindow.h"
+#include "GrallocImages.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Mutex.h"
+#include "nsThreadUtils.h"
+#include "OMXCodecWrapper.h"
+#include "TextureClient.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+#define DEQUEUE_BUFFER_TIMEOUT_US (100 * 1000ll) // 100ms.
+#define START_DEQUEUE_BUFFER_TIMEOUT_US (10 * DEQUEUE_BUFFER_TIMEOUT_US) // 1s.
+#define DRAIN_THREAD_TIMEOUT_US (1000 * 1000ll) // 1s.
+
+#define WOHVC_LOG_TAG "WebrtcOMXH264VideoCodec"
+#define CODEC_LOGV(...) CSFLogInfo(WOHVC_LOG_TAG, __VA_ARGS__)
+#define CODEC_LOGD(...) CSFLogDebug(WOHVC_LOG_TAG, __VA_ARGS__)
+#define CODEC_LOGI(...) CSFLogInfo(WOHVC_LOG_TAG, __VA_ARGS__)
+#define CODEC_LOGW(...) CSFLogWarn(WOHVC_LOG_TAG, __VA_ARGS__)
+#define CODEC_LOGE(...) CSFLogError(WOHVC_LOG_TAG, __VA_ARGS__)
+
+namespace mozilla {
+
+static const uint8_t kNALStartCode[] = { 0x00, 0x00, 0x00, 0x01 };
+enum {
+ kNALTypeIDR = 5,
+ kNALTypeSPS = 7,
+ kNALTypePPS = 8,
+};
+
+// NS_INLINE_DECL_THREADSAFE_REFCOUNTING() cannot be used directly in
+// ImageNativeHandle below because the return type of webrtc::NativeHandle
+// AddRef()/Release() conflicts with those defined in macro. To avoid another
+// copy/paste of ref-counting implementation here, this dummy base class
+// is created to proivde another level of indirection.
+class DummyRefCountBase {
+public:
+ // Use the name of real class for logging.
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DummyRefCountBase)
+protected:
+ // To make sure subclass will be deleted/destructed properly.
+ virtual ~DummyRefCountBase() {}
+};
+
+// This function implements 2 interafces:
+// 1. webrtc::NativeHandle: to wrap layers::Image object so decoded frames can
+// be passed through WebRTC rendering pipeline using TextureVideoFrame.
+// 2. ImageHandle: for renderer to get the image object inside without knowledge
+// about webrtc::NativeHandle.
+class ImageNativeHandle final
+ : public webrtc::NativeHandle
+ , public DummyRefCountBase
+{
+public:
+ ImageNativeHandle(layers::Image* aImage)
+ : mImage(aImage)
+ {}
+
+ // Implement webrtc::NativeHandle.
+ virtual void* GetHandle() override { return mImage.get(); }
+
+ virtual int AddRef() override
+ {
+ return DummyRefCountBase::AddRef();
+ }
+
+ virtual int Release() override
+ {
+ return DummyRefCountBase::Release();
+ }
+
+private:
+ RefPtr<layers::Image> mImage;
+};
+
+struct EncodedFrame
+{
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint32_t mTimestamp;
+ int64_t mRenderTimeMs;
+};
+
+static void
+ShutdownThread(nsCOMPtr<nsIThread>& aThread)
+{
+ aThread->Shutdown();
+}
+
+// Base runnable class to repeatly pull OMX output buffers in seperate thread.
+// How to use:
+// - implementing DrainOutput() to get output. Remember to return false to tell
+// drain not to pop input queue.
+// - call QueueInput() to schedule a run to drain output. The input, aFrame,
+// should contains corresponding info such as image size and timestamps for
+// DrainOutput() implementation to construct data needed by encoded/decoded
+// callbacks.
+// TODO: Bug 997110 - Revisit queue/drain logic. Current design assumes that
+// encoder only generate one output buffer per input frame and won't work
+// if encoder drops frames or generates multiple output per input.
+class OMXOutputDrain : public Runnable
+{
+public:
+ void Start() {
+ CODEC_LOGD("OMXOutputDrain starting");
+ MonitorAutoLock lock(mMonitor);
+ if (mThread == nullptr) {
+ NS_NewNamedThread("OMXOutputDrain", getter_AddRefs(mThread));
+ }
+ CODEC_LOGD("OMXOutputDrain started");
+ mEnding = false;
+ mThread->Dispatch(this, NS_DISPATCH_NORMAL);
+ }
+
+ void Stop() {
+ CODEC_LOGD("OMXOutputDrain stopping");
+ MonitorAutoLock lock(mMonitor);
+ mEnding = true;
+ lock.NotifyAll(); // In case Run() is waiting.
+
+ if (mThread != nullptr) {
+ MonitorAutoUnlock unlock(mMonitor);
+ CODEC_LOGD("OMXOutputDrain thread shutdown");
+ NS_DispatchToMainThread(
+ WrapRunnableNM<decltype(&ShutdownThread),
+ nsCOMPtr<nsIThread> >(&ShutdownThread, mThread));
+ mThread = nullptr;
+ }
+ CODEC_LOGD("OMXOutputDrain stopped");
+ }
+
+ void QueueInput(const EncodedFrame& aFrame)
+ {
+ MonitorAutoLock lock(mMonitor);
+
+ MOZ_ASSERT(mThread);
+
+ mInputFrames.push(aFrame);
+ // Notify Run() about queued input and it can start working.
+ lock.NotifyAll();
+ }
+
+ NS_IMETHOD Run() override
+ {
+ MonitorAutoLock lock(mMonitor);
+ if (mEnding) {
+ return NS_OK;
+ }
+ MOZ_ASSERT(mThread);
+
+ while (true) {
+ if (mInputFrames.empty()) {
+ // Wait for new input.
+ lock.Wait();
+ }
+
+ if (mEnding) {
+ CODEC_LOGD("OMXOutputDrain Run() ending");
+ // Stop draining.
+ break;
+ }
+
+ MOZ_ASSERT(!mInputFrames.empty());
+ {
+ // Release monitor while draining because it's blocking.
+ MonitorAutoUnlock unlock(mMonitor);
+ DrainOutput();
+ }
+ }
+
+ CODEC_LOGD("OMXOutputDrain Ended");
+ return NS_OK;
+ }
+
+protected:
+ OMXOutputDrain()
+ : mMonitor("OMXOutputDrain monitor")
+ , mEnding(false)
+ {}
+
+ // Drain output buffer for input frame queue mInputFrames.
+ // mInputFrames contains info such as size and time of the input frames.
+ // We have to give a queue to handle encoder frame skips - we can input 10
+ // frames and get one back. NOTE: any access of aInputFrames MUST be preceded
+ // locking mMonitor!
+
+ // Blocks waiting for decoded buffers, but for a limited period because
+ // we need to check for shutdown.
+ virtual bool DrainOutput() = 0;
+
+protected:
+ // This monitor protects all things below it, and is also used to
+ // wait/notify queued input.
+ Monitor mMonitor;
+ std::queue<EncodedFrame> mInputFrames;
+
+private:
+ // also protected by mMonitor
+ nsCOMPtr<nsIThread> mThread;
+ bool mEnding;
+};
+
+// Assumption: SPS is first paramset or is not present
+static bool IsParamSets(uint8_t* aData, size_t aSize)
+{
+ MOZ_ASSERT(aData && aSize > sizeof(kNALStartCode));
+ return (aData[sizeof(kNALStartCode)] & 0x1f) == kNALTypeSPS;
+}
+
+// get the length of any pre-pended SPS/PPS's
+static size_t ParamSetLength(uint8_t* aData, size_t aSize)
+{
+ const uint8_t* data = aData;
+ size_t size = aSize;
+ const uint8_t* nalStart = nullptr;
+ size_t nalSize = 0;
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ if ((*nalStart & 0x1f) != kNALTypeSPS &&
+ (*nalStart & 0x1f) != kNALTypePPS) {
+ MOZ_ASSERT(nalStart - sizeof(kNALStartCode) >= aData);
+ return (nalStart - sizeof(kNALStartCode)) - aData; // SPS/PPS/iframe
+ }
+ }
+ return aSize; // it's only SPS/PPS
+}
+
+// H.264 decoder using stagefright.
+// It implements gonk native window callback to receive buffers from
+// MediaCodec::RenderOutputBufferAndRelease().
+class WebrtcOMXDecoder final : public GonkNativeWindowNewFrameCallback
+{
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebrtcOMXDecoder)
+
+private:
+ virtual ~WebrtcOMXDecoder()
+ {
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p OMX destructor", this);
+ if (mStarted) {
+ Stop();
+ }
+ if (mCodec != nullptr) {
+ mCodec->release();
+ mCodec.clear();
+ }
+ mLooper.clear();
+ }
+
+public:
+ WebrtcOMXDecoder(const char* aMimeType,
+ webrtc::DecodedImageCallback* aCallback)
+ : mWidth(0)
+ , mHeight(0)
+ , mStarted(false)
+ , mCallback(aCallback)
+ , mDecodedFrameLock("WebRTC decoded frame lock")
+ , mEnding(false)
+ {
+ // Create binder thread pool required by stagefright.
+ android::ProcessState::self()->startThreadPool();
+
+ mLooper = new ALooper;
+ mLooper->start();
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p creating decoder", this);
+ mCodec = MediaCodec::CreateByType(mLooper, aMimeType, false /* encoder */);
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p OMX created", this);
+ }
+
+ // Find SPS in input data and extract picture width and height if found.
+ static status_t ExtractPicDimensions(uint8_t* aData, size_t aSize,
+ int32_t* aWidth, int32_t* aHeight)
+ {
+ MOZ_ASSERT(aData && aSize > sizeof(kNALStartCode));
+ if ((aData[sizeof(kNALStartCode)] & 0x1f) != kNALTypeSPS) {
+ return ERROR_MALFORMED;
+ }
+ sp<ABuffer> sps = new ABuffer(&aData[sizeof(kNALStartCode)], aSize - sizeof(kNALStartCode));
+ FindAVCDimensions(sps, aWidth, aHeight);
+ return OK;
+ }
+
+ // Configure decoder using image width/height.
+ status_t ConfigureWithPicDimensions(int32_t aWidth, int32_t aHeight)
+ {
+ MOZ_ASSERT(mCodec != nullptr);
+ if (mCodec == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ CODEC_LOGD("OMX:%p decoder width:%d height:%d", this, aWidth, aHeight);
+
+ sp<AMessage> config = new AMessage();
+ config->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
+ config->setInt32("width", aWidth);
+ config->setInt32("height", aHeight);
+ mWidth = aWidth;
+ mHeight = aHeight;
+
+ sp<Surface> surface = nullptr;
+#if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21
+ sp<IGraphicBufferProducer> producer;
+ sp<IGonkGraphicBufferConsumer> consumer;
+ GonkBufferQueue::createBufferQueue(&producer, &consumer);
+ mNativeWindow = new GonkNativeWindow(consumer);
+#else
+ mNativeWindow = new GonkNativeWindow();
+#endif
+ if (mNativeWindow.get()) {
+ // listen to buffers queued by MediaCodec::RenderOutputBufferAndRelease().
+ mNativeWindow->setNewFrameCallback(this);
+ // XXX remove buffer changes after a better solution lands - bug 1009420
+#if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21
+ static_cast<GonkBufferQueueProducer*>(producer.get())->setSynchronousMode(false);
+ // More spare buffers to avoid OMX decoder waiting for native window
+ consumer->setMaxAcquiredBufferCount(WEBRTC_OMX_H264_MIN_DECODE_BUFFERS);
+ surface = new Surface(producer);
+#else
+ sp<GonkBufferQueue> bq = mNativeWindow->getBufferQueue();
+ bq->setSynchronousMode(false);
+ // More spare buffers to avoid OMX decoder waiting for native window
+ bq->setMaxAcquiredBufferCount(WEBRTC_OMX_H264_MIN_DECODE_BUFFERS);
+ surface = new Surface(bq);
+#endif
+ }
+ status_t result = mCodec->configure(config, surface, nullptr, 0);
+ if (result == OK) {
+ CODEC_LOGD("OMX:%p decoder configured", this);
+ result = Start();
+ }
+ return result;
+ }
+
+ status_t
+ FillInput(const webrtc::EncodedImage& aEncoded, bool aIsFirstFrame,
+ int64_t& aRenderTimeMs)
+ {
+ MOZ_ASSERT(mCodec != nullptr && aEncoded._buffer && aEncoded._length > 0);
+ if (mCodec == nullptr || !aEncoded._buffer || aEncoded._length == 0) {
+ return INVALID_OPERATION;
+ }
+
+ // Break input encoded data into NALUs and send each one to decode.
+ // 8x10 decoder doesn't allow picture coding NALs to be in the same buffer
+ // with SPS/PPS (BUFFER_FLAG_CODECCONFIG) per QC
+ const uint8_t* data = aEncoded._buffer;
+ size_t size = aEncoded._length;
+ const uint8_t* nalStart = nullptr;
+ size_t nalSize = 0;
+ status_t err = OK;
+
+ // this returns a pointer to the NAL byte (after the StartCode)
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ // Individual NALU inherits metadata from input encoded data.
+ webrtc::EncodedImage nalu(aEncoded);
+
+ nalu._buffer = const_cast<uint8_t*>(nalStart) - sizeof(kNALStartCode);
+ MOZ_ASSERT(nalu._buffer >= aEncoded._buffer);
+ nalu._length = nalSize + sizeof(kNALStartCode);
+ MOZ_ASSERT(nalu._buffer + nalu._length <= aEncoded._buffer + aEncoded._length);
+
+ size_t index;
+ err = mCodec->dequeueInputBuffer(&index,
+ aIsFirstFrame ? START_DEQUEUE_BUFFER_TIMEOUT_US : DEQUEUE_BUFFER_TIMEOUT_US);
+ if (err != OK) {
+ if (err != -EAGAIN) {
+ CODEC_LOGE("decode dequeue input buffer error:%d", err);
+ } else {
+ CODEC_LOGE("decode dequeue 100ms without a buffer (EAGAIN)");
+ }
+ return err;
+ }
+
+ // Prepend start code to buffer.
+ MOZ_ASSERT(memcmp(nalu._buffer, kNALStartCode, sizeof(kNALStartCode)) == 0);
+ const sp<ABuffer>& omxIn = mInputBuffers.itemAt(index);
+ MOZ_ASSERT(omxIn->capacity() >= nalu._length);
+ omxIn->setRange(0, nalu._length);
+ // Copying is needed because MediaCodec API doesn't support externally
+ // allocated buffer as input.
+ uint8_t* dst = omxIn->data();
+ memcpy(dst, nalu._buffer, nalu._length);
+ int64_t inputTimeUs = (nalu._timeStamp * 1000ll) / 90; // 90kHz -> us.
+ // Assign input flags according to input buffer NALU and frame types.
+ uint32_t flags;
+ int nalType = dst[sizeof(kNALStartCode)] & 0x1f;
+ switch (nalType) {
+ case kNALTypeSPS:
+ case kNALTypePPS:
+ flags = MediaCodec::BUFFER_FLAG_CODECCONFIG;
+ break;
+ case kNALTypeIDR:
+ flags = MediaCodec::BUFFER_FLAG_SYNCFRAME;
+ break;
+ default:
+ flags = 0;
+ break;
+ }
+ CODEC_LOGD("Decoder input: %d bytes (NAL 0x%02x), time %lld (%u), flags 0x%x",
+ nalu._length, dst[sizeof(kNALStartCode)], inputTimeUs, nalu._timeStamp, flags);
+ err = mCodec->queueInputBuffer(index, 0, nalu._length, inputTimeUs, flags);
+ if (err == OK && !(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
+ if (mOutputDrain == nullptr) {
+ mOutputDrain = new OutputDrain(this);
+ mOutputDrain->Start();
+ }
+ EncodedFrame frame;
+ frame.mWidth = mWidth;
+ frame.mHeight = mHeight;
+ frame.mTimestamp = nalu._timeStamp;
+ frame.mRenderTimeMs = aRenderTimeMs;
+ mOutputDrain->QueueInput(frame);
+ }
+ }
+
+ return err;
+ }
+
+ status_t
+ DrainOutput(std::queue<EncodedFrame>& aInputFrames, Monitor& aMonitor)
+ {
+ MOZ_ASSERT(mCodec != nullptr);
+ if (mCodec == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ size_t index = 0;
+ size_t outOffset = 0;
+ size_t outSize = 0;
+ int64_t outTime = -1ll;
+ uint32_t outFlags = 0;
+ status_t err = mCodec->dequeueOutputBuffer(&index, &outOffset, &outSize,
+ &outTime, &outFlags,
+ DRAIN_THREAD_TIMEOUT_US);
+ switch (err) {
+ case OK:
+ break;
+ case -EAGAIN:
+ // Not an error: output not available yet. Try later.
+ CODEC_LOGI("decode dequeue OMX output buffer timed out. Try later.");
+ return err;
+ case INFO_FORMAT_CHANGED:
+ // Not an error: will get this value when OMX output buffer is enabled,
+ // or when input size changed.
+ CODEC_LOGD("decode dequeue OMX output buffer format change");
+ return err;
+ case INFO_OUTPUT_BUFFERS_CHANGED:
+ // Not an error: will get this value when OMX output buffer changed
+ // (probably because of input size change).
+ CODEC_LOGD("decode dequeue OMX output buffer change");
+ err = mCodec->getOutputBuffers(&mOutputBuffers);
+ MOZ_ASSERT(err == OK);
+ return INFO_OUTPUT_BUFFERS_CHANGED;
+ default:
+ CODEC_LOGE("decode dequeue OMX output buffer error:%d", err);
+ // Return OK to instruct OutputDrain to drop input from queue.
+ MonitorAutoLock lock(aMonitor);
+ aInputFrames.pop();
+ return OK;
+ }
+
+ CODEC_LOGD("Decoder output: %d bytes, offset %u, time %lld, flags 0x%x",
+ outSize, outOffset, outTime, outFlags);
+ if (mCallback) {
+ EncodedFrame frame;
+ {
+ MonitorAutoLock lock(aMonitor);
+ frame = aInputFrames.front();
+ aInputFrames.pop();
+ }
+ {
+ // Store info of this frame. OnNewFrame() will need the timestamp later.
+ MutexAutoLock lock(mDecodedFrameLock);
+ if (mEnding) {
+ mCodec->releaseOutputBuffer(index);
+ return err;
+ }
+ mDecodedFrames.push(frame);
+ }
+ // Ask codec to queue buffer back to native window. OnNewFrame() will be
+ // called.
+ mCodec->renderOutputBufferAndRelease(index);
+ // Once consumed, buffer will be queued back to GonkNativeWindow for codec
+ // to dequeue/use.
+ } else {
+ mCodec->releaseOutputBuffer(index);
+ }
+
+ return err;
+ }
+
+ // Will be called when MediaCodec::RenderOutputBufferAndRelease() returns
+ // buffers back to native window for rendering.
+ void OnNewFrame() override
+ {
+ RefPtr<layers::TextureClient> buffer = mNativeWindow->getCurrentBuffer();
+ if (!buffer) {
+ CODEC_LOGE("Decoder NewFrame: Get null buffer");
+ return;
+ }
+
+ gfx::IntSize picSize(buffer->GetSize());
+ nsAutoPtr<layers::GrallocImage> grallocImage(new layers::GrallocImage());
+ grallocImage->AdoptData(buffer, picSize);
+
+ // Get timestamp of the frame about to render.
+ int64_t timestamp = -1;
+ int64_t renderTimeMs = -1;
+ {
+ MutexAutoLock lock(mDecodedFrameLock);
+ if (mDecodedFrames.empty()) {
+ return;
+ }
+ EncodedFrame decoded = mDecodedFrames.front();
+ timestamp = decoded.mTimestamp;
+ renderTimeMs = decoded.mRenderTimeMs;
+ mDecodedFrames.pop();
+ }
+ MOZ_ASSERT(timestamp >= 0 && renderTimeMs >= 0);
+
+ CODEC_LOGD("Decoder NewFrame: %dx%d, timestamp %lld, renderTimeMs %lld",
+ picSize.width, picSize.height, timestamp, renderTimeMs);
+
+ nsAutoPtr<webrtc::I420VideoFrame> videoFrame(new webrtc::I420VideoFrame(
+ new ImageNativeHandle(grallocImage.forget()),
+ picSize.width,
+ picSize.height,
+ timestamp,
+ renderTimeMs));
+ if (videoFrame != nullptr) {
+ mCallback->Decoded(*videoFrame);
+ }
+ }
+
+private:
+ class OutputDrain : public OMXOutputDrain
+ {
+ public:
+ OutputDrain(WebrtcOMXDecoder* aOMX)
+ : OMXOutputDrain()
+ , mOMX(aOMX)
+ {}
+
+ protected:
+ virtual bool DrainOutput() override
+ {
+ return (mOMX->DrainOutput(mInputFrames, mMonitor) == OK);
+ }
+
+ private:
+ WebrtcOMXDecoder* mOMX;
+ };
+
+ status_t Start()
+ {
+ MOZ_ASSERT(!mStarted);
+ if (mStarted) {
+ return OK;
+ }
+
+ {
+ MutexAutoLock lock(mDecodedFrameLock);
+ mEnding = false;
+ }
+ status_t err = mCodec->start();
+ if (err == OK) {
+ mStarted = true;
+ mCodec->getInputBuffers(&mInputBuffers);
+ mCodec->getOutputBuffers(&mOutputBuffers);
+ }
+
+ return err;
+ }
+
+ status_t Stop()
+ {
+ MOZ_ASSERT(mStarted);
+ if (!mStarted) {
+ return OK;
+ }
+
+ CODEC_LOGD("OMXOutputDrain decoder stopping");
+ // Drop all 'pending to render' frames.
+ {
+ MutexAutoLock lock(mDecodedFrameLock);
+ mEnding = true;
+ while (!mDecodedFrames.empty()) {
+ mDecodedFrames.pop();
+ }
+ }
+
+ if (mOutputDrain != nullptr) {
+ CODEC_LOGD("decoder's OutputDrain stopping");
+ mOutputDrain->Stop();
+ mOutputDrain = nullptr;
+ }
+
+ status_t err = mCodec->stop();
+ if (err == OK) {
+ mInputBuffers.clear();
+ mOutputBuffers.clear();
+ mStarted = false;
+ } else {
+ MOZ_ASSERT(false);
+ }
+ CODEC_LOGD("OMXOutputDrain decoder stopped");
+ return err;
+ }
+
+ sp<ALooper> mLooper;
+ sp<MediaCodec> mCodec; // OMXCodec
+ int mWidth;
+ int mHeight;
+ android::Vector<sp<ABuffer> > mInputBuffers;
+ android::Vector<sp<ABuffer> > mOutputBuffers;
+ bool mStarted;
+
+ sp<GonkNativeWindow> mNativeWindow;
+
+ RefPtr<OutputDrain> mOutputDrain;
+ webrtc::DecodedImageCallback* mCallback;
+
+ Mutex mDecodedFrameLock; // To protect mDecodedFrames and mEnding
+ std::queue<EncodedFrame> mDecodedFrames;
+ bool mEnding;
+};
+
+class EncOutputDrain : public OMXOutputDrain
+{
+public:
+ EncOutputDrain(OMXVideoEncoder* aOMX, webrtc::EncodedImageCallback* aCallback)
+ : OMXOutputDrain()
+ , mOMX(aOMX)
+ , mCallback(aCallback)
+ , mIsPrevFrameParamSets(false)
+ {}
+
+protected:
+ virtual bool DrainOutput() override
+ {
+ nsTArray<uint8_t> output;
+ int64_t timeUs = -1ll;
+ int flags = 0;
+ nsresult rv = mOMX->GetNextEncodedFrame(&output, &timeUs, &flags,
+ DRAIN_THREAD_TIMEOUT_US);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ // Fail to get encoded frame. The corresponding input frame should be
+ // removed.
+ // We'll treat this like a skipped frame
+ return true;
+ }
+
+ if (output.Length() == 0) {
+ // No encoded data yet. Try later.
+ CODEC_LOGD("OMX: (encode no output available this time)");
+ return false;
+ }
+
+ // Conversion to us rounds down, so we need to round up for us->90KHz
+ uint32_t target_timestamp = (timeUs * 90ll + 999) / 1000; // us -> 90KHz
+ // 8x10 v2.0 encoder doesn't set this reliably:
+ //bool isParamSets = (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG);
+ // Assume that SPS/PPS will be at the start of any buffer
+ // Assume PPS will not be in a separate buffer - SPS/PPS or SPS/PPS/iframe
+ bool isParamSets = IsParamSets(output.Elements(), output.Length());
+ bool isIFrame = (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME);
+ CODEC_LOGD("OMX: encoded frame (%d): time %lld (%u), flags x%x",
+ output.Length(), timeUs, target_timestamp, flags);
+ // Should not be parameter sets and I-frame at the same time.
+ // Except that it is possible, apparently, after an encoder re-config (bug 1063883)
+ // MOZ_ASSERT(!(isParamSets && isIFrame));
+
+ if (mCallback) {
+ // Implementation here assumes encoder output to be a buffer containing
+ // parameter sets(SPS + PPS) followed by a series of buffers, each for
+ // one input frame.
+ // TODO: handle output violating this assumpton in bug 997110.
+ webrtc::EncodedImage encoded(output.Elements(), output.Length(),
+ output.Capacity());
+ encoded._frameType = (isParamSets || isIFrame) ?
+ webrtc::kKeyFrame : webrtc::kDeltaFrame;
+ EncodedFrame input_frame;
+ {
+ MonitorAutoLock lock(mMonitor);
+ // will sps/pps have the same timestamp as their iframe? Initial one on 8x10 has
+ // 0 timestamp.
+ if (isParamSets) {
+ // Let's assume it was the first item in the queue, but leave it there since an
+ // IDR will follow
+ input_frame = mInputFrames.front();
+ } else {
+ do {
+ if (mInputFrames.empty()) {
+ // Let's assume it was the last item in the queue, but leave it there
+ mInputFrames.push(input_frame);
+ CODEC_LOGE("OMX: encoded timestamp %u which doesn't match input queue!! (head %u)",
+ target_timestamp, input_frame.mTimestamp);
+ break;
+ }
+
+ input_frame = mInputFrames.front();
+ mInputFrames.pop();
+ if (input_frame.mTimestamp != target_timestamp) {
+ CODEC_LOGD("OMX: encoder skipped frame timestamp %u", input_frame.mTimestamp);
+ }
+ } while (input_frame.mTimestamp != target_timestamp);
+ }
+ }
+
+ encoded._encodedWidth = input_frame.mWidth;
+ encoded._encodedHeight = input_frame.mHeight;
+ encoded._timeStamp = input_frame.mTimestamp;
+ encoded.capture_time_ms_ = input_frame.mRenderTimeMs;
+ encoded._completeFrame = true;
+
+ CODEC_LOGD("Encoded frame: %d bytes, %dx%d, is_param %d, is_iframe %d, timestamp %u, captureTimeMs %" PRIu64,
+ encoded._length, encoded._encodedWidth, encoded._encodedHeight,
+ isParamSets, isIFrame, encoded._timeStamp, encoded.capture_time_ms_);
+ // Prepend SPS/PPS to I-frames unless they were sent last time.
+ SendEncodedDataToCallback(encoded, isIFrame && !mIsPrevFrameParamSets && !isParamSets);
+ // This will be true only for the frame following a paramset block! So if we're
+ // working with a correct encoder that generates SPS/PPS then iframe always, we
+ // won't try to insert. (also, don't set if we get SPS/PPS/iframe in one buffer)
+ mIsPrevFrameParamSets = isParamSets && !isIFrame;
+ if (isParamSets) {
+ // copy off the param sets for inserting later
+ mParamSets.Clear();
+ // since we may have SPS/PPS or SPS/PPS/iframe
+ size_t length = ParamSetLength(encoded._buffer, encoded._length);
+ MOZ_ASSERT(length > 0);
+ mParamSets.AppendElements(encoded._buffer, length);
+ }
+ }
+
+ return !isParamSets; // not really needed anymore
+ }
+
+private:
+ // Send encoded data to callback.The data will be broken into individual NALUs
+ // if necessary and sent to callback one by one. This function can also insert
+ // SPS/PPS NALUs in front of input data if requested.
+ void SendEncodedDataToCallback(webrtc::EncodedImage& aEncodedImage,
+ bool aPrependParamSets)
+ {
+ if (aPrependParamSets) {
+ webrtc::EncodedImage prepend(aEncodedImage);
+ // Insert current parameter sets in front of the input encoded data.
+ MOZ_ASSERT(mParamSets.Length() > sizeof(kNALStartCode)); // Start code + ...
+ prepend._length = mParamSets.Length();
+ prepend._buffer = mParamSets.Elements();
+ // Break into NALUs and send.
+ CODEC_LOGD("Prepending SPS/PPS: %d bytes, timestamp %u, captureTimeMs %" PRIu64,
+ prepend._length, prepend._timeStamp, prepend.capture_time_ms_);
+ SendEncodedDataToCallback(prepend, false);
+ }
+
+ struct nal_entry {
+ uint32_t offset;
+ uint32_t size;
+ };
+ AutoTArray<nal_entry, 1> nals;
+
+ // Break input encoded data into NALUs and send each one to callback.
+ const uint8_t* data = aEncodedImage._buffer;
+ size_t size = aEncodedImage._length;
+ const uint8_t* nalStart = nullptr;
+ size_t nalSize = 0;
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ // XXX optimize by making buffer an offset
+ nal_entry nal = {((uint32_t) (nalStart - aEncodedImage._buffer)), (uint32_t) nalSize};
+ nals.AppendElement(nal);
+ }
+
+ size_t num_nals = nals.Length();
+ if (num_nals > 0) {
+ webrtc::RTPFragmentationHeader fragmentation;
+ fragmentation.VerifyAndAllocateFragmentationHeader(num_nals);
+ for (size_t i = 0; i < num_nals; i++) {
+ fragmentation.fragmentationOffset[i] = nals[i].offset;
+ fragmentation.fragmentationLength[i] = nals[i].size;
+ }
+ webrtc::EncodedImage unit(aEncodedImage);
+ unit._completeFrame = true;
+
+ mCallback->Encoded(unit, nullptr, &fragmentation);
+ }
+ }
+
+ OMXVideoEncoder* mOMX;
+ webrtc::EncodedImageCallback* mCallback;
+ bool mIsPrevFrameParamSets;
+ nsTArray<uint8_t> mParamSets;
+};
+
+// Encoder.
+WebrtcOMXH264VideoEncoder::WebrtcOMXH264VideoEncoder()
+ : mOMX(nullptr)
+ , mCallback(nullptr)
+ , mWidth(0)
+ , mHeight(0)
+ , mFrameRate(0)
+ , mBitRateKbps(0)
+#ifdef OMX_IDR_NEEDED_FOR_BITRATE
+ , mBitRateAtLastIDR(0)
+#endif
+ , mOMXConfigured(false)
+ , mOMXReconfigure(false)
+{
+ mReservation = new OMXCodecReservation(true);
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p constructed", this);
+}
+
+int32_t
+WebrtcOMXH264VideoEncoder::InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumOfCores,
+ size_t aMaxPayloadSize)
+{
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p init", this);
+
+ if (mOMX == nullptr) {
+ nsAutoPtr<OMXVideoEncoder> omx(OMXCodecWrapper::CreateAVCEncoder());
+ if (NS_WARN_IF(omx == nullptr)) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ mOMX = omx.forget();
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p OMX created", this);
+ }
+
+ if (!mReservation->ReserveOMXCodec()) {
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p Encoder in use", this);
+ mOMX = nullptr;
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Defer configuration until 1st frame is received because this function will
+ // be called more than once, and unfortunately with incorrect setting values
+ // at first.
+ mWidth = aCodecSettings->width;
+ mHeight = aCodecSettings->height;
+ mFrameRate = aCodecSettings->maxFramerate;
+ mBitRateKbps = aCodecSettings->startBitrate;
+ // XXX handle maxpayloadsize (aka mode 0/1)
+
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p OMX Encoder reserved", this);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcOMXH264VideoEncoder::Encode(const webrtc::I420VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes)
+{
+ MOZ_ASSERT(mOMX != nullptr);
+ if (mOMX == nullptr) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Have to reconfigure for resolution or framerate changes :-(
+ // ~220ms initial configure on 8x10, 50-100ms for re-configure it appears
+ // XXX drop frames while this is happening?
+ if (aInputImage.width() < 0 || (uint32_t)aInputImage.width() != mWidth ||
+ aInputImage.height() < 0 || (uint32_t)aInputImage.height() != mHeight) {
+ mWidth = aInputImage.width();
+ mHeight = aInputImage.height();
+ mOMXReconfigure = true;
+ }
+
+ if (!mOMXConfigured || mOMXReconfigure) {
+ if (mOMXConfigured) {
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p reconfiguring encoder %dx%d @ %u fps",
+ this, mWidth, mHeight, mFrameRate);
+ mOMXConfigured = false;
+ }
+ mOMXReconfigure = false;
+ // XXX This can take time. Encode() likely assumes encodes are queued "quickly" and
+ // don't block the input too long. Frames may build up.
+
+ // XXX take from negotiated SDP in codecSpecific data
+ OMX_VIDEO_AVCLEVELTYPE level = OMX_VIDEO_AVCLevel3;
+ // OMX_Video_ControlRateConstant is not supported on QC 8x10
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = OMX_Video_ControlRateConstantSkipFrames;
+
+ // Set up configuration parameters for AVC/H.264 encoder.
+ sp<AMessage> format = new AMessage;
+ // Fixed values
+ format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
+ // XXX We should only set to < infinity if we're not using any recovery RTCP options
+ // However, we MUST set it to a lower value because the 8x10 rate controller
+ // only changes rate at GOP boundaries.... but it also changes rate on requested GOPs
+
+ // Too long and we have very low bitrates for the first second or two... plus
+ // bug 1014921 means we have to force them every ~3 seconds or less.
+ format->setInt32("i-frame-interval", 4 /* seconds */);
+ // See mozilla::layers::GrallocImage, supports YUV 4:2:0, CbCr width and
+ // height is half that of Y
+ format->setInt32("color-format", OMX_COLOR_FormatYUV420SemiPlanar);
+ format->setInt32("profile", OMX_VIDEO_AVCProfileBaseline);
+ format->setInt32("level", level);
+ format->setInt32("bitrate-mode", bitrateMode);
+ format->setInt32("store-metadata-in-buffers", 0);
+ // XXX Unfortunately, 8x10 doesn't support this, but ask anyways
+ format->setInt32("prepend-sps-pps-to-idr-frames", 1);
+ // Input values.
+ format->setInt32("width", mWidth);
+ format->setInt32("height", mHeight);
+ format->setInt32("stride", mWidth);
+ format->setInt32("slice-height", mHeight);
+ format->setInt32("frame-rate", mFrameRate);
+ format->setInt32("bitrate", mBitRateKbps*1000);
+
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p configuring encoder %dx%d @ %d fps, rate %d kbps",
+ this, mWidth, mHeight, mFrameRate, mBitRateKbps);
+ nsresult rv = mOMX->ConfigureDirect(format,
+ OMXVideoEncoder::BlobFormat::AVC_NAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ CODEC_LOGE("WebrtcOMXH264VideoEncoder:%p FAILED configuring encoder %d", this, int(rv));
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+ mOMXConfigured = true;
+#ifdef OMX_IDR_NEEDED_FOR_BITRATE
+ mLastIDRTime = TimeStamp::Now();
+ mBitRateAtLastIDR = mBitRateKbps;
+#endif
+ }
+
+ if (aFrameTypes && aFrameTypes->size() &&
+ ((*aFrameTypes)[0] == webrtc::kKeyFrame)) {
+ mOMX->RequestIDRFrame();
+#ifdef OMX_IDR_NEEDED_FOR_BITRATE
+ mLastIDRTime = TimeStamp::Now();
+ mBitRateAtLastIDR = mBitRateKbps;
+ } else if (mBitRateKbps != mBitRateAtLastIDR) {
+ // 8x10 OMX codec requires a keyframe to shift bitrates!
+ TimeStamp now = TimeStamp::Now();
+ if (mLastIDRTime.IsNull()) {
+ // paranoia
+ mLastIDRTime = now;
+ }
+ int32_t timeSinceLastIDR = (now - mLastIDRTime).ToMilliseconds();
+
+ // Balance asking for IDRs too often against direction and amount of bitrate change.
+
+ // HACK for bug 1014921: 8x10 has encode/decode mismatches that build up errors
+ // if you go too long without an IDR. In normal use, bitrate will change often
+ // enough to never hit this time limit.
+ if ((timeSinceLastIDR > 3000) ||
+ (mBitRateKbps < (mBitRateAtLastIDR * 8)/10) ||
+ (timeSinceLastIDR < 300 && mBitRateKbps < (mBitRateAtLastIDR * 9)/10) ||
+ (timeSinceLastIDR < 1000 && mBitRateKbps < (mBitRateAtLastIDR * 97)/100) ||
+ (timeSinceLastIDR >= 1000 && mBitRateKbps < mBitRateAtLastIDR) ||
+ (mBitRateKbps > (mBitRateAtLastIDR * 15)/10) ||
+ (timeSinceLastIDR < 500 && mBitRateKbps > (mBitRateAtLastIDR * 13)/10) ||
+ (timeSinceLastIDR < 1000 && mBitRateKbps > (mBitRateAtLastIDR * 11)/10) ||
+ (timeSinceLastIDR >= 1000 && mBitRateKbps > mBitRateAtLastIDR)) {
+ CODEC_LOGD("Requesting IDR for bitrate change from %u to %u (time since last idr %dms)",
+ mBitRateAtLastIDR, mBitRateKbps, timeSinceLastIDR);
+
+ mOMX->RequestIDRFrame();
+ mLastIDRTime = now;
+ mBitRateAtLastIDR = mBitRateKbps;
+ }
+#endif
+ }
+
+ // Wrap I420VideoFrame input with PlanarYCbCrImage for OMXVideoEncoder.
+ layers::PlanarYCbCrData yuvData;
+ yuvData.mYChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kYPlane));
+ yuvData.mYSize = gfx::IntSize(aInputImage.width(), aInputImage.height());
+ yuvData.mYStride = aInputImage.stride(webrtc::kYPlane);
+ MOZ_ASSERT(aInputImage.stride(webrtc::kUPlane) == aInputImage.stride(webrtc::kVPlane));
+ yuvData.mCbCrStride = aInputImage.stride(webrtc::kUPlane);
+ yuvData.mCbChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kUPlane));
+ yuvData.mCrChannel = const_cast<uint8_t*>(aInputImage.buffer(webrtc::kVPlane));
+ yuvData.mCbCrSize = gfx::IntSize((yuvData.mYSize.width + 1) / 2,
+ (yuvData.mYSize.height + 1) / 2);
+ yuvData.mPicSize = yuvData.mYSize;
+ yuvData.mStereoMode = StereoMode::MONO;
+ layers::RecyclingPlanarYCbCrImage img(nullptr);
+ // AdoptData() doesn't need AllocateAndGetNewBuffer(); OMXVideoEncoder is ok with this
+ img.AdoptData(yuvData);
+
+ CODEC_LOGD("Encode frame: %dx%d, timestamp %u (%lld), renderTimeMs %" PRIu64,
+ aInputImage.width(), aInputImage.height(),
+ aInputImage.timestamp(), aInputImage.timestamp() * 1000ll / 90,
+ aInputImage.render_time_ms());
+
+ nsresult rv = mOMX->Encode(&img,
+ yuvData.mYSize.width,
+ yuvData.mYSize.height,
+ aInputImage.timestamp() * 1000ll / 90, // 90kHz -> us.
+ 0);
+ if (rv == NS_OK) {
+ if (mOutputDrain == nullptr) {
+ mOutputDrain = new EncOutputDrain(mOMX, mCallback);
+ mOutputDrain->Start();
+ }
+ EncodedFrame frame;
+ frame.mWidth = mWidth;
+ frame.mHeight = mHeight;
+ frame.mTimestamp = aInputImage.timestamp();
+ frame.mRenderTimeMs = aInputImage.render_time_ms();
+ mOutputDrain->QueueInput(frame);
+ }
+
+ return (rv == NS_OK) ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR;
+}
+
+int32_t
+WebrtcOMXH264VideoEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* aCallback)
+{
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set callback:%p", this, aCallback);
+ MOZ_ASSERT(aCallback);
+ mCallback = aCallback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcOMXH264VideoEncoder::Release()
+{
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p will be released", this);
+
+ if (mOutputDrain != nullptr) {
+ mOutputDrain->Stop();
+ mOutputDrain = nullptr;
+ }
+ mOMXConfigured = false;
+ bool hadOMX = !!mOMX;
+ mOMX = nullptr;
+ if (hadOMX) {
+ mReservation->ReleaseOMXCodec();
+ }
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p released", this);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcOMXH264VideoEncoder::~WebrtcOMXH264VideoEncoder()
+{
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p will be destructed", this);
+
+ Release();
+}
+
+// Inform the encoder of the new packet loss rate and the round-trip time of
+// the network. aPacketLossRate is fraction lost and can be 0~255
+// (255 means 100% lost).
+// Note: stagefright doesn't handle these parameters.
+int32_t
+WebrtcOMXH264VideoEncoder::SetChannelParameters(uint32_t aPacketLossRate,
+ int64_t aRoundTripTimeMs)
+{
+ CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set channel packet loss:%u, rtt:%" PRIi64,
+ this, aPacketLossRate, aRoundTripTimeMs);
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+// TODO: Bug 997567. Find the way to support frame rate change.
+int32_t
+WebrtcOMXH264VideoEncoder::SetRates(uint32_t aBitRateKbps, uint32_t aFrameRate)
+{
+ CODEC_LOGE("WebrtcOMXH264VideoEncoder:%p set bitrate:%u, frame rate:%u (%u))",
+ this, aBitRateKbps, aFrameRate, mFrameRate);
+ MOZ_ASSERT(mOMX != nullptr);
+ if (mOMX == nullptr) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+
+ // XXX Should use StageFright framerate change, perhaps only on major changes of framerate.
+
+ // Without Stagefright support, Algorithm should be:
+ // if (frameRate < 50% of configured) {
+ // drop framerate to next step down that includes current framerate within 50%
+ // } else if (frameRate > configured) {
+ // change config to next step up that includes current framerate
+ // }
+#if !defined(TEST_OMX_FRAMERATE_CHANGES)
+ if (aFrameRate > mFrameRate ||
+ aFrameRate < mFrameRate/2) {
+ uint32_t old_rate = mFrameRate;
+ if (aFrameRate >= 15) {
+ mFrameRate = 30;
+ } else if (aFrameRate >= 10) {
+ mFrameRate = 20;
+ } else if (aFrameRate >= 8) {
+ mFrameRate = 15;
+ } else /* if (aFrameRate >= 5)*/ {
+ // don't go lower; encoder may not be stable
+ mFrameRate = 10;
+ }
+ if (mFrameRate < aFrameRate) { // safety
+ mFrameRate = aFrameRate;
+ }
+ if (old_rate != mFrameRate) {
+ mOMXReconfigure = true; // force re-configure on next frame
+ }
+ }
+#else
+ // XXX for testing, be wild!
+ if (aFrameRate != mFrameRate) {
+ mFrameRate = aFrameRate;
+ mOMXReconfigure = true; // force re-configure on next frame
+ }
+#endif
+
+ // XXX Limit bitrate for 8x10 devices to a specific level depending on fps and resolution
+ // mBitRateKbps = LimitBitrate8x10(mWidth, mHeight, mFrameRate, aBitRateKbps);
+ // Rely on global single setting (~720 kbps for HVGA@30fps) for now
+ if (aBitRateKbps > 700) {
+ aBitRateKbps = 700;
+ }
+ mBitRateKbps = aBitRateKbps;
+ nsresult rv = mOMX->SetBitrate(mBitRateKbps);
+ NS_WARNING_ASSERTION(NS_SUCCEEDED(rv), "SetBitrate failed");
+ return NS_FAILED(rv) ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR;
+}
+
+// Decoder.
+WebrtcOMXH264VideoDecoder::WebrtcOMXH264VideoDecoder()
+ : mCallback(nullptr)
+ , mOMX(nullptr)
+{
+ mReservation = new OMXCodecReservation(false);
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be constructed", this);
+}
+
+int32_t
+WebrtcOMXH264VideoDecoder::InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumOfCores)
+{
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p init OMX:%p", this, mOMX.get());
+
+ if (!mReservation->ReserveOMXCodec()) {
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p Decoder in use", this);
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ // Defer configuration until SPS/PPS NALUs (where actual decoder config
+ // values can be extracted) are received.
+
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p OMX Decoder reserved", this);
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcOMXH264VideoDecoder::Decode(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ int64_t aRenderTimeMs)
+{
+ if (aInputImage._length== 0 || !aInputImage._buffer) {
+ return WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ bool configured = !!mOMX;
+ if (!configured) {
+ // Search for SPS NALU in input to get width/height config.
+ int32_t width;
+ int32_t height;
+ status_t result = WebrtcOMXDecoder::ExtractPicDimensions(aInputImage._buffer,
+ aInputImage._length,
+ &width, &height);
+ if (result != OK) {
+ // Cannot config decoder because SPS haven't been seen.
+ CODEC_LOGI("WebrtcOMXH264VideoDecoder:%p missing SPS in input (nal 0x%02x, len %d)",
+ this, aInputImage._buffer[sizeof(kNALStartCode)] & 0x1f, aInputImage._length);
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ RefPtr<WebrtcOMXDecoder> omx = new WebrtcOMXDecoder(MEDIA_MIMETYPE_VIDEO_AVC,
+ mCallback);
+ result = omx->ConfigureWithPicDimensions(width, height);
+ if (NS_WARN_IF(result != OK)) {
+ return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p start OMX", this);
+ mOMX = omx;
+ }
+
+ bool feedFrame = true;
+ while (feedFrame) {
+ status_t err = mOMX->FillInput(aInputImage, !configured, aRenderTimeMs);
+ feedFrame = (err == -EAGAIN); // No input buffer available. Try again.
+ }
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcOMXH264VideoDecoder::RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* aCallback)
+{
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p set callback:%p", this, aCallback);
+ MOZ_ASSERT(aCallback);
+ mCallback = aCallback;
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+WebrtcOMXH264VideoDecoder::Release()
+{
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be released", this);
+
+ mOMX = nullptr; // calls Stop()
+ mReservation->ReleaseOMXCodec();
+
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+WebrtcOMXH264VideoDecoder::~WebrtcOMXH264VideoDecoder()
+{
+ CODEC_LOGD("WebrtcOMXH264VideoDecoder:%p will be destructed", this);
+ Release();
+}
+
+int32_t
+WebrtcOMXH264VideoDecoder::Reset()
+{
+ CODEC_LOGW("WebrtcOMXH264VideoDecoder::Reset() will NOT reset decoder");
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+}
diff --git a/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h
new file mode 100644
index 000000000..71cf5c681
--- /dev/null
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h
@@ -0,0 +1,108 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBRTC_GONK
+#pragma error WebrtcOMXH264VideoCodec works only on B2G.
+#endif
+
+#ifndef WEBRTC_OMX_H264_CODEC_H_
+#define WEBRTC_OMX_H264_CODEC_H_
+
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include <foundation/ABase.h>
+#include <utils/RefBase.h>
+#include "OMXCodecWrapper.h"
+
+namespace android {
+ class OMXVideoEncoder;
+}
+
+namespace mozilla {
+
+class WebrtcOMXDecoder;
+class OMXOutputDrain;
+
+// XXX see if we can reduce this
+#define WEBRTC_OMX_H264_MIN_DECODE_BUFFERS 10
+#define OMX_IDR_NEEDED_FOR_BITRATE 0
+
+class WebrtcOMXH264VideoEncoder : public WebrtcVideoEncoder
+{
+public:
+ WebrtcOMXH264VideoEncoder();
+
+ virtual ~WebrtcOMXH264VideoEncoder();
+
+ // Implement VideoEncoder interface.
+ virtual uint64_t PluginID() const override { return 0; }
+
+ virtual int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumOfCores,
+ size_t aMaxPayloadSize) override;
+
+ virtual int32_t Encode(const webrtc::I420VideoFrame& aInputImage,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* aFrameTypes) override;
+
+ virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* aCallback) override;
+
+ virtual int32_t Release() override;
+
+ virtual int32_t SetChannelParameters(uint32_t aPacketLossRate,
+ int64_t aRoundTripTimeMs) override;
+
+ virtual int32_t SetRates(uint32_t aBitRate, uint32_t aFrameRate) override;
+
+private:
+ nsAutoPtr<android::OMXVideoEncoder> mOMX;
+ android::sp<android::OMXCodecReservation> mReservation;
+
+ webrtc::EncodedImageCallback* mCallback;
+ RefPtr<OMXOutputDrain> mOutputDrain;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint32_t mFrameRate;
+ uint32_t mBitRateKbps;
+#ifdef OMX_IDR_NEEDED_FOR_BITRATE
+ uint32_t mBitRateAtLastIDR;
+ TimeStamp mLastIDRTime;
+#endif
+ bool mOMXConfigured;
+ bool mOMXReconfigure;
+ webrtc::EncodedImage mEncodedImage;
+};
+
+class WebrtcOMXH264VideoDecoder : public WebrtcVideoDecoder
+{
+public:
+ WebrtcOMXH264VideoDecoder();
+
+ virtual ~WebrtcOMXH264VideoDecoder();
+
+ // Implement VideoDecoder interface.
+ virtual uint64_t PluginID() const override { return 0; }
+
+ virtual int32_t InitDecode(const webrtc::VideoCodec* aCodecSettings,
+ int32_t aNumOfCores) override;
+ virtual int32_t Decode(const webrtc::EncodedImage& aInputImage,
+ bool aMissingFrames,
+ const webrtc::RTPFragmentationHeader* aFragmentation,
+ const webrtc::CodecSpecificInfo* aCodecSpecificInfo = nullptr,
+ int64_t aRenderTimeMs = -1) override;
+ virtual int32_t RegisterDecodeCompleteCallback(webrtc::DecodedImageCallback* callback) override;
+
+ virtual int32_t Release() override;
+
+ virtual int32_t Reset() override;
+
+private:
+ webrtc::DecodedImageCallback* mCallback;
+ RefPtr<WebrtcOMXDecoder> mOMX;
+ android::sp<android::OMXCodecReservation> mReservation;
+};
+
+}
+
+#endif // WEBRTC_OMX_H264_CODEC_H_
diff --git a/media/webrtc/signaling/src/media/CSFAudioControlWrapper.cpp b/media/webrtc/signaling/src/media/CSFAudioControlWrapper.cpp
new file mode 100644
index 000000000..87ca396b7
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFAudioControlWrapper.cpp
@@ -0,0 +1,149 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+#include "CSFAudioControlWrapper.h"
+
+static const char* logTag = "VcmSipccBinding";
+
+namespace CSF {
+
+ std::vector<std::string> AudioControlWrapper::getRecordingDevices()
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->getRecordingDevices();
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to getRecordingDevices for expired audio control");
+ std::vector<std::string> vec;
+ return vec;
+ }
+ }
+
+ std::vector<std::string> AudioControlWrapper::getPlayoutDevices()
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->getPlayoutDevices();
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to getPlayoutDevices for expired audio control");
+ std::vector<std::string> vec;
+ return vec;
+ }
+ }
+
+ std::string AudioControlWrapper::getRecordingDevice()
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->getRecordingDevice();
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to getRecordingDevice for expired audio control");
+ return "";
+ }
+ }
+
+ std::string AudioControlWrapper::getPlayoutDevice()
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->getPlayoutDevice();
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to getPlayoutDevice for expired audio control");
+ return "";
+ }
+ }
+
+ bool AudioControlWrapper::setRecordingDevice( const std::string& name )
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->setRecordingDevice(name);
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to setRecordingDevice to %s for expired audio control",
+ name.c_str());
+ return false;
+ }
+ }
+
+ bool AudioControlWrapper::setPlayoutDevice( const std::string& name )
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->setPlayoutDevice(name);
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to setPlayoutDevice to %s for expired audio control",
+ name.c_str());
+ return false;
+ }
+ }
+
+ bool AudioControlWrapper::setDefaultVolume( int volume )
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->setDefaultVolume(volume);
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to setDefaultVolume for expired audio control");
+ return false;
+ }
+ }
+
+ int AudioControlWrapper::getDefaultVolume()
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->getDefaultVolume();
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to getDefaultVolume for expired audio control");
+ return -1;
+ }
+ }
+
+ bool AudioControlWrapper::setRingerVolume( int volume )
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->setRingerVolume(volume);
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to setRingerVolume for expired audio control");
+ return false;
+ }
+ }
+
+ int AudioControlWrapper::getRingerVolume()
+ {
+ if (_realAudioControl != nullptr)
+ {
+ return _realAudioControl->getRingerVolume();
+ }
+ else
+ {
+ CSFLogWarn( logTag, "Attempt to getRingerVolume for expired audio control");
+ return -1;
+ }
+ }
+
+ AudioControlWrapper::~AudioControlWrapper()
+ {
+ }
+}
diff --git a/media/webrtc/signaling/src/media/CSFAudioControlWrapper.h b/media/webrtc/signaling/src/media/CSFAudioControlWrapper.h
new file mode 100644
index 000000000..0faccbd0d
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFAudioControlWrapper.h
@@ -0,0 +1,42 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#pragma once
+
+#include "mozilla/RefPtr.h"
+#include "CC_Common.h"
+#include "CSFAudioControl.h"
+
+namespace CSF
+{
+ DECLARE_NS_PTR(AudioControlWrapper)
+ class ECC_API AudioControlWrapper : public AudioControl
+ {
+ public:
+ // device names are in UTF-8 encoding
+
+ explicit AudioControlWrapper(AudioControl * audioControl){_realAudioControl = audioControl;};
+ virtual std::vector<std::string> getRecordingDevices();
+ virtual std::vector<std::string> getPlayoutDevices();
+
+ virtual std::string getRecordingDevice();
+ virtual std::string getPlayoutDevice();
+
+ virtual bool setRecordingDevice( const std::string& name );
+ virtual bool setPlayoutDevice( const std::string& name );
+
+ virtual bool setDefaultVolume( int volume );
+ virtual int getDefaultVolume();
+
+ virtual bool setRingerVolume( int volume );
+ virtual int getRingerVolume();
+
+ virtual void setAudioControl(AudioControl * audioControl){_realAudioControl = audioControl;};
+
+ private:
+ virtual ~AudioControlWrapper();
+
+ RefPtr<AudioControl> _realAudioControl;
+ };
+};
diff --git a/media/webrtc/signaling/src/media/CSFAudioTermination.h b/media/webrtc/signaling/src/media/CSFAudioTermination.h
new file mode 100644
index 000000000..25bc0c095
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFAudioTermination.h
@@ -0,0 +1,117 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFAUDIOMEDIATERMINATION_H_
+#define CSFAUDIOMEDIATERMINATION_H_
+
+#include <CSFMediaTermination.h>
+
+typedef enum
+{
+ AudioCodecMask_G711 = 1,
+ AudioCodecMask_LINEAR = 2,
+ AudioCodecMask_G722 = 4,
+ AudioCodecMask_iLBC = 16,
+ AudioCodecMask_iSAC = 32
+
+} AudioCodecMask;
+
+typedef enum
+{
+ RingMode_INSIDE_RING,
+ RingMode_OUTSIDE_RING,
+ RingMode_FEATURE_RING,
+ RingMode_BELLCORE_DR1,
+ RingMode_BELLCORE_DR2,
+ RingMode_BELLCORE_DR3,
+ RingMode_BELLCORE_DR4,
+ RingMode_BELLCORE_DR5,
+ RingMode_FLASHONLY_RING,
+ RingMode_PRECEDENCE_RING
+
+} RingMode;
+
+typedef enum
+{
+ ToneType_INSIDE_DIAL_TONE,
+ ToneType_OUTSIDE_DIAL_TONE,
+ ToneType_LINE_BUSY_TONE,
+ ToneType_ALERTING_TONE,
+ ToneType_BUSY_VERIFY_TONE,
+ ToneType_STUTTER_TONE,
+ ToneType_MSG_WAITING_TONE,
+ ToneType_REORDER_TONE,
+ ToneType_CALL_WAITING_TONE,
+ ToneType_CALL_WAITING_2_TONE,
+ ToneType_CALL_WAITING_3_TONE,
+ ToneType_CALL_WAITING_4_TONE,
+ ToneType_HOLD_TONE,
+ ToneType_CONFIRMATION_TONE,
+ ToneType_PERMANENT_SIGNAL_TONE,
+ ToneType_REMINDER_RING_TONE,
+ ToneType_NO_TONE,
+ ToneType_ZIP_ZIP,
+ ToneType_ZIP,
+ ToneType_BEEP_BONK,
+ ToneType_RECORDERWARNING_TONE,
+ ToneType_RECORDERDETECTED_TONE,
+ ToneType_MONITORWARNING_TONE,
+ ToneType_SECUREWARNING_TONE
+
+} ToneType;
+
+typedef enum
+{
+ ToneDirection_PLAY_TONE_TO_EAR = 1,
+ ToneDirection_PLAY_TONE_TO_NET = 2,
+ ToneDirection_PLAY_TONE_TO_ALL = 3
+
+} ToneDirection;
+
+typedef enum
+{
+ AudioPayloadType_G711ALAW64K = 2,
+ AudioPayloadType_G711ALAW56K = 3,
+ AudioPayloadType_G711ULAW64K = 4,
+ AudioPayloadType_G711ULAW56K = 5,
+ AudioPayloadType_G722_64K = 6,
+ AudioPayloadType_G722_56K = 7,
+ AudioPayloadType_G722_48K = 8,
+ AudioPayloadType_RFC2833 = 38,
+ AudioPayloadType_ILBC20 = 39,
+ AudioPayloadType_ILBC30 = 40,
+ AudioPayloadType_ISAC = 41,
+ AudioPayloadType_OPUS = 109
+
+} AudioPayloadType;
+
+#if __cplusplus
+
+namespace CSF
+{
+ //AudioTermination adds to the core MediaTermination
+ class AudioTermination : public MediaTermination
+ {
+ public:
+ virtual int toneStart ( ToneType type, ToneDirection direction, int alertInfo, int groupId, int streamId, bool useBackup ) = 0;
+ virtual int toneStop ( ToneType type, int groupId, int streamId ) = 0;
+ virtual int ringStart ( int lineId, RingMode mode, bool once ) = 0;
+ virtual int ringStop ( int lineId ) = 0;
+
+ virtual int sendDtmf ( int streamId, int digit ) = 0;
+ virtual bool mute ( int streamId, bool mute ) = 0;
+ virtual bool isMuted ( int streamId ) = 0;
+
+ virtual bool setVolume ( int streamId, int volume ) = 0;
+ virtual int getVolume ( int streamId ) = 0;
+
+ virtual void setVADEnabled (bool vadEnabled) = 0;
+
+ };
+
+} // namespace
+
+#endif // __cplusplus
+
+#endif /* CSFAUDIOMEDIATERMINATION_H_ */
diff --git a/media/webrtc/signaling/src/media/CSFMediaProvider.h b/media/webrtc/signaling/src/media/CSFMediaProvider.h
new file mode 100644
index 000000000..d61f8c067
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFMediaProvider.h
@@ -0,0 +1,54 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFMEDIAPROVIDER_H_
+#define CSFMEDIAPROVIDER_H_
+
+#if __cplusplus
+
+#include <string>
+
+namespace CSF
+{
+ class AudioControl;
+ class VideoControl;
+ class AudioTermination;
+ class VideoTermination;
+ class MediaProviderObserver;
+
+
+ class MediaProvider
+ {
+ public:
+ static MediaProvider* create( );///factory method for all MediaProvider derived types (ctor is protected).
+ virtual ~MediaProvider() = 0;
+
+ virtual int init() = 0;
+ virtual void shutdown() = 0;
+
+ virtual AudioControl* getAudioControl() = 0;
+ virtual VideoControl* getVideoControl() = 0;
+ virtual AudioTermination* getAudioTermination() = 0;
+ virtual VideoTermination* getVideoTermination() = 0;
+
+ virtual void addMediaProviderObserver( MediaProviderObserver* observer ) = 0;
+
+ protected:
+ MediaProvider() {};
+ };
+
+ class MediaProviderObserver
+ {
+ public:
+ virtual void onVideoModeChanged( bool enable ) {}
+ virtual void onKeyFrameRequested( int callId ) {}
+ virtual void onMediaLost( int callId ) {}
+ virtual void onMediaRestored( int callId ) {}
+ };
+
+} // namespace
+
+#endif // __cplusplus
+
+#endif /* CSFMEDIAPROVIDER_H_ */
diff --git a/media/webrtc/signaling/src/media/CSFMediaTermination.h b/media/webrtc/signaling/src/media/CSFMediaTermination.h
new file mode 100644
index 000000000..1636c6c50
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFMediaTermination.h
@@ -0,0 +1,55 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFMEDIATERMINATION_H_
+#define CSFMEDIATERMINATION_H_
+
+// definitions shared by audio and video
+
+typedef enum
+{
+ CodecRequestType_DECODEONLY,
+ CodecRequestType_ENCODEONLY,
+ CodecRequestType_FULLDUPLEX,
+ CodecRequestType_IGNORE,
+
+} CodecRequestType;
+
+typedef enum
+{
+ EncryptionAlgorithm_NONE,
+ EncryptionAlgorithm_AES_128_COUNTER
+
+} EncryptionAlgorithm;
+
+#if __cplusplus
+
+namespace CSF
+{
+ class MediaTermination
+ {
+ public:
+ virtual int getCodecList( CodecRequestType requestType ) = 0;
+
+ virtual int rxAlloc ( int groupId, int streamId, int requestedPort ) = 0;
+ virtual int rxOpen ( int groupId, int streamId, int requestedPort, int listenIp, bool isMulticast ) = 0;
+ virtual int rxStart ( int groupId, int streamId, int payloadType, int packPeriod, int localPort, int rfc2833PayloadType,
+ EncryptionAlgorithm algorithm, unsigned char* key, int keyLen, unsigned char* salt, int saltLen, int mode, int party ) = 0;
+ virtual void rxClose ( int groupId, int streamId ) = 0;
+ virtual void rxRelease ( int groupId, int streamId, int port ) = 0;
+
+ virtual int txStart ( int groupId, int streamId, int payloadType, int packPeriod, bool vad, short tos,
+ char* remoteIpAddr, int remotePort, int rfc2833PayloadType, EncryptionAlgorithm algorithm,
+ unsigned char* key, int keyLen, unsigned char* salt, int saltLen, int mode, int party ) = 0;
+ virtual void txClose ( int groupId, int streamId ) = 0;
+
+ virtual void setLocalIP ( const char* addr ) = 0;
+ virtual void setMediaPorts ( int startPort, int endPort ) = 0;
+ virtual void setDSCPValue ( int value ) = 0;
+ };
+} // namespace
+
+#endif // __cplusplus
+
+#endif /* CSFMEDIATERMINATION_H_ */
diff --git a/media/webrtc/signaling/src/media/CSFToneDefinitions.h b/media/webrtc/signaling/src/media/CSFToneDefinitions.h
new file mode 100644
index 000000000..2fe6f879d
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFToneDefinitions.h
@@ -0,0 +1,137 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Tone Coefficients and YN_2 values */
+/* Based on 8kHz sampling rate */
+#define MILLISECONDS_TO_SAMPLES(PERIOD) (short)((PERIOD) << 3)
+
+#define TGN_COEF_300 (short)31863
+#define TGN_COEF_350 (short)31538
+#define TGN_COEF_425 (short)30959
+#define TGN_COEF_440 (short)30831
+#define TGN_COEF_450 (short)30743
+#define TGN_COEF_480 (short)30467
+#define TGN_COEF_500 (short)30274
+#define TGN_COEF_548 (short)29780
+#define TGN_COEF_600 (short)29197
+#define TGN_COEF_620 (short)28959
+#define TGN_COEF_697 (short)27980
+#define TGN_COEF_770 (short)26956
+#define TGN_COEF_852 (short)25701
+#define TGN_COEF_941 (short)24219
+#define TGN_COEF_1209 (short)19073
+#define TGN_COEF_1336 (short)16325
+#define TGN_COEF_1400 (short)14876
+#define TGN_COEF_1477 (short)13085
+#define TGN_COEF_1633 (short)9315
+#define TGN_COEF_1000 (short)23170
+#define TGN_COEF_1MW (short)23098
+#define TGN_COEF_1MW_neg15dBm (short)23098
+
+#define TGN_YN_2_300 (short)-840
+#define TGN_YN_2_350 (short)-814
+#define TGN_YN_2_425 (short)-1966
+#define TGN_YN_2_440 (short)-2032
+#define TGN_YN_2_450 (short)-1384
+#define TGN_YN_2_480 (short)-1104
+#define TGN_YN_2_500 (short)-1148
+#define TGN_YN_2_548 (short)-1252
+#define TGN_YN_2_600 (short)-2270
+#define TGN_YN_2_620 (short)-1404
+#define TGN_YN_2_697 (short)-1561
+#define TGN_YN_2_770 (short)-1706
+#define TGN_YN_2_852 (short)-1861
+#define TGN_YN_2_941 (short)-2021
+#define TGN_YN_2_1209 (short)-2439
+#define TGN_YN_2_1336 (short)-2601
+#define TGN_YN_2_1400 (short)-5346 //tone level=-11.61 dBm0, same as CallWaiting CSCsd65600
+#define TGN_YN_2_1477 (short)-2750
+#define TGN_YN_2_1633 (short)-2875
+#define TGN_YN_2_1000 (short)-1414
+#define TGN_YN_2_1MW (short)-16192
+#define TGN_YN_2_1MW_neg15dBm (short)-2879
+
+// for MLPP tones
+#define TGN_COEF_440_PREC_RB (short)30831
+#define TGN_COEF_480_PREC_RB (short)30467
+#define TGN_COEF_440_PREEMP (short)30831
+#define TGN_COEF_620_PREEMP (short)28959
+#define TGN_COEF_440_PREC_CW (short)30831
+
+#define TGN_YN_2_440_PREC_RB (short)-1016
+#define TGN_YN_2_480_PREC_RB (short)-1104
+#define TGN_YN_2_440_PREEMP (short)-1016
+#define TGN_YN_2_620_PREEMP (short)-1404
+#define TGN_YN_2_440_PREC_CW (short)-1016
+
+
+/* Based on 16kHz sampling rate */
+/*
+#define MILLISECONDS_TO_SAMPLES(PERIOD) ((PERIOD) << 4)
+*/
+
+/* Tone Coefficients and YN_2 values */
+/* Based on 16kHz sampling rate */
+/*
+#define TGN_COEF_350 (short)32459
+#define TGN_COEF_440 (short)32280
+#define TGN_COEF_450 (short)32258
+#define TGN_COEF_480 (short)32188
+#define TGN_COEF_500 (short)32138
+#define TGN_COEF_548 (short)32012
+#define TGN_COEF_600 (short)31863
+#define TGN_COEF_620 (short)31802
+#define TGN_COEF_697 (short)31548
+#define TGN_COEF_770 (short)31281
+#define TGN_COEF_852 (short)30951
+#define TGN_COEF_941 (short)30556
+#define TGN_COEF_1209 (short)29144
+#define TGN_COEF_1336 (short)28361
+#define TGN_COEF_1477 (short)27409
+#define TGN_COEF_1633 (short)26258
+#define TGN_COEF_1000 (short)30274
+#define TGN_COEF_1MW (short)30254
+#define TGN_COEF_1MW_neg15dBm (short)30254
+
+#define TGN_YN_2_350 (short)-410
+#define TGN_YN_2_440 (short)-1031
+#define TGN_YN_2_450 (short)-702
+#define TGN_YN_2_480 (short)-561
+#define TGN_YN_2_500 (short)-584
+#define TGN_YN_2_548 (short)-640
+#define TGN_YN_2_600 (short)-1166
+#define TGN_YN_2_620 (short)-722
+#define TGN_YN_2_697 (short)-810
+#define TGN_YN_2_770 (short)-892
+#define TGN_YN_2_852 (short)-984
+#define TGN_YN_2_941 (short)-1083
+#define TGN_YN_2_1209 (short)-1370
+#define TGN_YN_2_1336 (short)-1502
+#define TGN_YN_2_1477 (short)-1643
+#define TGN_YN_2_1633 (short)-1794
+#define TGN_YN_2_1000 (short)-764
+#define TGN_YN_2_1MW (short)-8768
+#define TGN_YN_2_1MW_neg15dBm (short)-1558
+
+// for MLPP tones
+#define TGN_COEF_440_PREC_RB (short)32280
+#define TGN_COEF_480_PREC_RB (short)32188
+#define TGN_COEF_440_PREEMP (short)32280
+#define TGN_COEF_620_PREEMP (short)31802
+#define TGN_COEF_440_PREC_CW (short)32280
+
+#define TGN_YN_2_440_PREC_RB (short)-515
+#define TGN_YN_2_480_PREC_RB (short)-561
+#define TGN_YN_2_440_PREEMP (short)-515
+#define TGN_YN_2_620_PREEMP (short)-722
+#define TGN_YN_2_440_PREC_CW (short)-515
+*/
+
+#define BEEP_REC_ON MILLISECONDS_TO_SAMPLES(500)
+#define BEEP_REC_OFF MILLISECONDS_TO_SAMPLES((15000 - 500) / 2)
+#define BEEP_MON_ON1 MILLISECONDS_TO_SAMPLES(1500)
+#define BEEP_MON_OFF1 MILLISECONDS_TO_SAMPLES(8000)
+#define BEEP_MON_ON2 MILLISECONDS_TO_SAMPLES(500)
+#define BEEP_MON_OFF2 MILLISECONDS_TO_SAMPLES(8000)
+
diff --git a/media/webrtc/signaling/src/media/CSFVideoCallMediaControl.h b/media/webrtc/signaling/src/media/CSFVideoCallMediaControl.h
new file mode 100644
index 000000000..67c35ed7b
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFVideoCallMediaControl.h
@@ -0,0 +1,28 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFVIDEOCALLMEDIACONTROL_H_
+#define CSFVIDEOCALLMEDIACONTROL_H_
+
+#include <csf/CSFVideoMediaControl.h>
+
+#if __cplusplus
+
+namespace CSF
+{
+ class VideoCallMediaControl
+ {
+ public:
+ virtual void setVideoMode( VideoEnableMode mode ) = 0;
+
+ // window type is platform-specific
+ virtual void setRemoteWindow( VideoWindowHandle window ) = 0;
+ virtual void showRemoteWindow( bool show ) = 0;
+ };
+
+} // namespace
+
+#endif // __cplusplus
+
+#endif /* CSFVIDEOCALLMEDIACONTROL_H_ */
diff --git a/media/webrtc/signaling/src/media/CSFVideoControlWrapper.h b/media/webrtc/signaling/src/media/CSFVideoControlWrapper.h
new file mode 100644
index 000000000..226a7efae
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFVideoControlWrapper.h
@@ -0,0 +1,48 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFVIDEOCONTROLWRAPPER_H_
+#define CSFVIDEOCONTROLWRAPPER_H_
+
+#include "CC_Common.h"
+#include "CSFVideoControl.h"
+
+#if __cplusplus
+
+#include <string>
+#include <vector>
+
+namespace CSF
+{
+ DECLARE_NS_PTR(VideoControlWrapper)
+ typedef void *VideoWindowHandle;
+
+ class ECC_API VideoControlWrapper : public VideoControl
+ {
+ public:
+ explicit VideoControlWrapper(VideoControl * videoControl){_realVideoControl = videoControl;};
+
+ virtual void setVideoMode( bool enable );
+
+ // window type is platform-specific
+ virtual void setPreviewWindow( VideoWindowHandle window, int top, int left, int bottom, int right, RenderScaling style );
+ virtual void showPreviewWindow( bool show );
+
+ // device names are in UTF-8 encoding
+ virtual std::vector<std::string> getCaptureDevices();
+
+ virtual std::string getCaptureDevice();
+ virtual bool setCaptureDevice( const std::string& name );
+
+ virtual void setVideoControl( VideoControl * videoControl ){_realVideoControl = videoControl;};
+
+ private:
+ VideoControl * _realVideoControl;
+ };
+
+} // namespace
+
+#endif // __cplusplus
+
+#endif /* CSFVIDEOCONTROLWRAPPER_H_ */
diff --git a/media/webrtc/signaling/src/media/CSFVideoTermination.h b/media/webrtc/signaling/src/media/CSFVideoTermination.h
new file mode 100644
index 000000000..62fcf8569
--- /dev/null
+++ b/media/webrtc/signaling/src/media/CSFVideoTermination.h
@@ -0,0 +1,36 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CSFVIDEOMEDIATERMINATION_H_
+#define CSFVIDEOMEDIATERMINATION_H_
+
+#include <CSFMediaTermination.h>
+#include <CSFVideoControl.h>
+
+typedef enum
+{
+ VideoCodecMask_H264 = 1,
+ VideoCodecMask_H263 = 2
+
+} VideoCodecMask;
+
+#if __cplusplus
+
+namespace CSF
+{
+ class VideoTermination : public MediaTermination
+ {
+ public:
+ virtual void setRemoteWindow( int streamId, VideoWindowHandle window) = 0;
+ virtual int setExternalRenderer( int streamId, VideoFormat videoFormat, ExternalRendererHandle render) = 0;
+ virtual void sendIFrame ( int streamId ) = 0;
+ virtual bool mute ( int streamId, bool mute ) = 0;
+ virtual void setAudioStreamId( int streamId) = 0;
+ };
+
+} // namespace
+
+#endif // __cplusplus
+
+#endif /* CSFVIDEOMEDIATERMINATION_H_ */
diff --git a/media/webrtc/signaling/src/media/cip_mmgr_mediadefinitions.h b/media/webrtc/signaling/src/media/cip_mmgr_mediadefinitions.h
new file mode 100644
index 000000000..7d1706ce2
--- /dev/null
+++ b/media/webrtc/signaling/src/media/cip_mmgr_mediadefinitions.h
@@ -0,0 +1,125 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _Included_cip_mmgr_MediaDefinitions
+#define _Included_cip_mmgr_MediaDefinitions
+#ifdef __cplusplus
+extern "C" {
+#endif
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_NONSTANDARD
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_NONSTANDARD 1L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ALAW64K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ALAW64K 2L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ALAW56K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ALAW56K 3L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ULAW64K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ULAW64K 4L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ULAW56K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G711ULAW56K 5L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G722_64K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G722_64K 6L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G722_56K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G722_56K 7L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G722_48K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G722_48K 8L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G7231_5P3K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G7231_5P3K 9L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G7231_6P3K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G7231_6P3K 10L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G728
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G728 11L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729 12L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729ANNEXA
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729ANNEXA 13L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_IS11172AUDIOCAP
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_IS11172AUDIOCAP 14L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_IS13818AUDIOCAP
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_IS13818AUDIOCAP 15L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729ANNEXB
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729ANNEXB 16L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729ANNEXAWANNEXB
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G729ANNEXAWANNEXB 17L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM_FULL_RATE
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM_FULL_RATE 18L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM_HALF_RATE
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM_HALF_RATE 19L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM_ENHANCED_FULL_RATE
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM_ENHANCED_FULL_RATE 20L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_WIDE_BAND_256K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_WIDE_BAND_256K 21L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_WIDE_BAND_128K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_WIDE_BAND_128K 22L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_DATA64
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_DATA64 23L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_DATA56
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_DATA56 24L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_GSM 25L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_ACTIVEVOICE
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_ACTIVEVOICE 26L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G726_32K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G726_32K 27L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G726_24K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G726_24K 28L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_G726_16K
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_G726_16K 29L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_H261
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_H261 30L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_H263
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_H263 31L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_T120
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_T120 32L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_H224
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_H224 33L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_PCM_44_1K_16BIT_STEREO
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_PCM_44_1K_16BIT_STEREO 34L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_PCM_44_1K_16BIT_MONO
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_PCM_44_1K_16BIT_MONO 35L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_ZORAN_VIDEO
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_ZORAN_VIDEO 36L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_LOGITECH_VIDEO
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_LOGITECH_VIDEO 37L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_RFC2833
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_RFC2833 38L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_ILBC20
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_ILBC20 39L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_ILBC30
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_ILBC30 40L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_ISAC
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_ISAC 41L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_VP8
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_VP8 42L
+#undef cip_mmgr_MediaDefinitions_MEDIA_TYPE_OPUS
+#define cip_mmgr_MediaDefinitions_MEDIA_TYPE_OPUS 109L
+
+
+#undef cip_mmgr_MediaDefinitions_BANDWIDTH_NARROWBAND
+#define cip_mmgr_MediaDefinitions_BANDWIDTH_NARROWBAND 200L
+#undef cip_mmgr_MediaDefinitions_BANDWIDTH_WIDEBAND
+#define cip_mmgr_MediaDefinitions_BANDWIDTH_WIDEBAND 201L
+
+#undef cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G711_BIT_POSITION
+#define cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G711_BIT_POSITION 1L
+#undef cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G729A_BIT_POSITION
+#define cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G729A_BIT_POSITION 2L
+#undef cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G729B_BIT_POSITION
+#define cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G729B_BIT_POSITION 4L
+#undef cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_LINEAR_BIT_POSITION
+#define cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_LINEAR_BIT_POSITION 8L
+#undef cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G722_BIT_POSITION
+#define cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_G722_BIT_POSITION 16L
+#undef cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_ILBC_BIT_POSITION
+#define cip_mmgr_MediaDefinitions_MEDIA_RESOURCE_ILBC_BIT_POSITION 32L
+
+#undef cip_mmgr_MediaDefinitions_FULLDUPLEX
+#define cip_mmgr_MediaDefinitions_FULLDUPLEX 0L
+#undef cip_mmgr_MediaDefinitions_HALFDUPLEX_DECODE
+#define cip_mmgr_MediaDefinitions_HALFDUPLEX_DECODE 1L
+#undef cip_mmgr_MediaDefinitions_HALFDUPLEX_ENCODE
+#define cip_mmgr_MediaDefinitions_HALFDUPLEX_ENCODE 2L
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
new file mode 100644
index 000000000..586876406
--- /dev/null
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -0,0 +1,2377 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: ekr@rtfm.com
+
+#include "MediaPipeline.h"
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+#include "MediaStreamGraphImpl.h"
+#endif
+
+#include <math.h>
+
+#include "nspr.h"
+#include "srtp.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "VideoSegment.h"
+#include "Layers.h"
+#include "LayersLogging.h"
+#include "ImageTypes.h"
+#include "ImageContainer.h"
+#include "DOMMediaStream.h"
+#include "MediaStreamTrack.h"
+#include "MediaStreamListener.h"
+#include "MediaStreamVideoSink.h"
+#include "VideoUtils.h"
+#include "VideoStreamTrack.h"
+#ifdef WEBRTC_GONK
+#include "GrallocImages.h"
+#include "mozilla/layers/GrallocTextureClient.h"
+#endif
+#endif
+
+#include "nsError.h"
+#include "AudioSegment.h"
+#include "MediaSegment.h"
+#include "MediaPipelineFilter.h"
+#include "databuffer.h"
+#include "transportflow.h"
+#include "transportlayer.h"
+#include "transportlayerdtls.h"
+#include "transportlayerice.h"
+#include "runnable_utils.h"
+#include "libyuv/convert.h"
+#include "mozilla/SharedThreadPool.h"
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "mozilla/PeerIdentity.h"
+#include "mozilla/TaskQueue.h"
+#endif
+#include "mozilla/gfx/Point.h"
+#include "mozilla/gfx/Types.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+#include "mozilla/Sprintf.h"
+
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/interface/native_handle.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/video_engine/include/vie_errors.h"
+
+#include "logging.h"
+
+// Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
+// 48KHz)
+#define AUDIO_SAMPLE_BUFFER_MAX 480*2*2
+static_assert((WEBRTC_DEFAULT_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
+ <= AUDIO_SAMPLE_BUFFER_MAX,
+ "AUDIO_SAMPLE_BUFFER_MAX is not large enough");
+
+using namespace mozilla;
+using namespace mozilla::dom;
+using namespace mozilla::gfx;
+using namespace mozilla::layers;
+
+// Logging context
+MOZ_MTLOG_MODULE("mediapipeline")
+
+namespace mozilla {
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+class VideoConverterListener
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoConverterListener)
+
+ virtual void OnVideoFrameConverted(unsigned char* aVideoFrame,
+ unsigned int aVideoFrameLength,
+ unsigned short aWidth,
+ unsigned short aHeight,
+ VideoType aVideoType,
+ uint64_t aCaptureTime) = 0;
+
+ virtual void OnVideoFrameConverted(webrtc::I420VideoFrame& aVideoFrame) = 0;
+
+protected:
+ virtual ~VideoConverterListener() {}
+};
+
+// I420 buffer size macros
+#define YSIZE(x,y) ((x)*(y))
+#define CRSIZE(x,y) ((((x)+1) >> 1) * (((y)+1) >> 1))
+#define I420SIZE(x,y) (YSIZE((x),(y)) + 2 * CRSIZE((x),(y)))
+
+// An async video frame format converter.
+//
+// Input is typically a MediaStream(Track)Listener driven by MediaStreamGraph.
+//
+// We keep track of the size of the TaskQueue so we can drop frames if
+// conversion is taking too long.
+//
+// Output is passed through to all added VideoConverterListeners on a TaskQueue
+// thread whenever a frame is converted.
+class VideoFrameConverter
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameConverter)
+
+ VideoFrameConverter()
+ : mLength(0)
+ , last_img_(-1) // -1 is not a guaranteed invalid serial. See bug 1262134.
+ , disabled_frame_sent_(false)
+#ifdef DEBUG
+ , mThrottleCount(0)
+ , mThrottleRecord(0)
+#endif
+ , mMutex("VideoFrameConverter")
+ {
+ MOZ_COUNT_CTOR(VideoFrameConverter);
+
+ RefPtr<SharedThreadPool> pool =
+ SharedThreadPool::Get(NS_LITERAL_CSTRING("VideoFrameConverter"));
+
+ mTaskQueue = MakeAndAddRef<TaskQueue>(pool.forget());
+ }
+
+ void QueueVideoChunk(VideoChunk& aChunk, bool aForceBlack)
+ {
+ if (aChunk.IsNull()) {
+ return;
+ }
+
+ // We get passed duplicate frames every ~10ms even with no frame change.
+ int32_t serial = aChunk.mFrame.GetImage()->GetSerial();
+ if (serial == last_img_) {
+ return;
+ }
+ last_img_ = serial;
+
+ // A throttling limit of 1 allows us to convert 2 frames concurrently.
+ // It's short enough to not build up too significant a delay, while
+ // giving us a margin to not cause some machines to drop every other frame.
+ const int32_t queueThrottlingLimit = 1;
+ if (mLength > queueThrottlingLimit) {
+ MOZ_MTLOG(ML_DEBUG, "VideoFrameConverter " << this << " queue is full." <<
+ " Throttling by throwing away a frame.");
+#ifdef DEBUG
+ ++mThrottleCount;
+ mThrottleRecord = std::max(mThrottleCount, mThrottleRecord);
+#endif
+ return;
+ }
+
+#ifdef DEBUG
+ if (mThrottleCount > 0) {
+ auto level = ML_DEBUG;
+ if (mThrottleCount > 5) {
+ // Log at a higher level when we have large drops.
+ level = ML_INFO;
+ }
+ MOZ_MTLOG(level, "VideoFrameConverter " << this << " stopped" <<
+ " throttling after throwing away " << mThrottleCount <<
+ " frames. Longest throttle so far was " <<
+ mThrottleRecord << " frames.");
+ mThrottleCount = 0;
+ }
+#endif
+
+ bool forceBlack = aForceBlack || aChunk.mFrame.GetForceBlack();
+
+ if (forceBlack) {
+ // Reset the last-img check.
+ // -1 is not a guaranteed invalid serial. See bug 1262134.
+ last_img_ = -1;
+
+ if (disabled_frame_sent_) {
+ // After disabling we just pass one black frame to the encoder.
+ // Allocating and setting it to black steals some performance
+ // that can be avoided. We don't handle resolution changes while
+ // disabled for now.
+ return;
+ }
+
+ disabled_frame_sent_ = true;
+ } else {
+ disabled_frame_sent_ = false;
+ }
+
+ ++mLength; // Atomic
+
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod<StorensRefPtrPassByPtr<Image>, bool>(
+ this, &VideoFrameConverter::ProcessVideoFrame,
+ aChunk.mFrame.GetImage(), forceBlack);
+ mTaskQueue->Dispatch(runnable.forget());
+ }
+
+ void AddListener(VideoConverterListener* aListener)
+ {
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(!mListeners.Contains(aListener));
+ mListeners.AppendElement(aListener);
+ }
+
+ bool RemoveListener(VideoConverterListener* aListener)
+ {
+ MutexAutoLock lock(mMutex);
+
+ return mListeners.RemoveElement(aListener);
+ }
+
+ void Shutdown()
+ {
+ mTaskQueue->BeginShutdown();
+ mTaskQueue->AwaitShutdownAndIdle();
+ }
+
+protected:
+ virtual ~VideoFrameConverter()
+ {
+ MOZ_COUNT_DTOR(VideoFrameConverter);
+ }
+
+ void VideoFrameConverted(unsigned char* aVideoFrame,
+ unsigned int aVideoFrameLength,
+ unsigned short aWidth,
+ unsigned short aHeight,
+ VideoType aVideoType,
+ uint64_t aCaptureTime)
+ {
+ MutexAutoLock lock(mMutex);
+
+ for (RefPtr<VideoConverterListener>& listener : mListeners) {
+ listener->OnVideoFrameConverted(aVideoFrame, aVideoFrameLength,
+ aWidth, aHeight, aVideoType, aCaptureTime);
+ }
+ }
+
+ void VideoFrameConverted(webrtc::I420VideoFrame& aVideoFrame)
+ {
+ MutexAutoLock lock(mMutex);
+
+ for (RefPtr<VideoConverterListener>& listener : mListeners) {
+ listener->OnVideoFrameConverted(aVideoFrame);
+ }
+ }
+
+ void ProcessVideoFrame(Image* aImage, bool aForceBlack)
+ {
+ --mLength; // Atomic
+ MOZ_ASSERT(mLength >= 0);
+
+ if (aForceBlack) {
+ IntSize size = aImage->GetSize();
+ uint32_t yPlaneLen = YSIZE(size.width, size.height);
+ uint32_t cbcrPlaneLen = 2 * CRSIZE(size.width, size.height);
+ uint32_t length = yPlaneLen + cbcrPlaneLen;
+
+ // Send a black image.
+ auto pixelData = MakeUniqueFallible<uint8_t[]>(length);
+ if (pixelData) {
+ // YCrCb black = 0x10 0x80 0x80
+ memset(pixelData.get(), 0x10, yPlaneLen);
+ // Fill Cb/Cr planes
+ memset(pixelData.get() + yPlaneLen, 0x80, cbcrPlaneLen);
+
+ MOZ_MTLOG(ML_DEBUG, "Sending a black video frame");
+ VideoFrameConverted(pixelData.get(), length, size.width, size.height,
+ mozilla::kVideoI420, 0);
+ }
+ return;
+ }
+
+ ImageFormat format = aImage->GetFormat();
+#ifdef WEBRTC_GONK
+ GrallocImage* nativeImage = aImage->AsGrallocImage();
+ if (nativeImage) {
+ android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
+ int pixelFormat = graphicBuffer->getPixelFormat(); /* PixelFormat is an enum == int */
+ mozilla::VideoType destFormat;
+ switch (pixelFormat) {
+ case HAL_PIXEL_FORMAT_YV12:
+ // all android must support this
+ destFormat = mozilla::kVideoYV12;
+ break;
+ case GrallocImage::HAL_PIXEL_FORMAT_YCbCr_420_SP:
+ destFormat = mozilla::kVideoNV21;
+ break;
+ case GrallocImage::HAL_PIXEL_FORMAT_YCbCr_420_P:
+ destFormat = mozilla::kVideoI420;
+ break;
+ default:
+ // XXX Bug NNNNNNN
+ // use http://dxr.mozilla.org/mozilla-central/source/content/media/omx/I420ColorConverterHelper.cpp
+ // to convert unknown types (OEM-specific) to I420
+ MOZ_MTLOG(ML_ERROR, "Un-handled GRALLOC buffer type:" << pixelFormat);
+ MOZ_CRASH();
+ }
+ void *basePtr;
+ graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &basePtr);
+ uint32_t width = graphicBuffer->getWidth();
+ uint32_t height = graphicBuffer->getHeight();
+ // XXX gralloc buffer's width and stride could be different depends on implementations.
+
+ if (destFormat != mozilla::kVideoI420) {
+ unsigned char *video_frame = static_cast<unsigned char*>(basePtr);
+ webrtc::I420VideoFrame i420_frame;
+ int stride_y = width;
+ int stride_uv = (width + 1) / 2;
+ int target_width = width;
+ int target_height = height;
+ if (i420_frame.CreateEmptyFrame(target_width,
+ abs(target_height),
+ stride_y,
+ stride_uv, stride_uv) < 0) {
+ MOZ_ASSERT(false, "Can't allocate empty i420frame");
+ return;
+ }
+ webrtc::VideoType commonVideoType =
+ webrtc::RawVideoTypeToCommonVideoVideoType(
+ static_cast<webrtc::RawVideoType>((int)destFormat));
+ if (ConvertToI420(commonVideoType, video_frame, 0, 0, width, height,
+ I420SIZE(width, height), webrtc::kVideoRotation_0,
+ &i420_frame)) {
+ MOZ_ASSERT(false, "Can't convert video type for sending to I420");
+ return;
+ }
+ i420_frame.set_ntp_time_ms(0);
+ VideoFrameConverted(i420_frame);
+ } else {
+ VideoFrameConverted(static_cast<unsigned char*>(basePtr),
+ I420SIZE(width, height),
+ width,
+ height,
+ destFormat, 0);
+ }
+ graphicBuffer->unlock();
+ return;
+ } else
+#endif
+ if (format == ImageFormat::PLANAR_YCBCR) {
+ // Cast away constness b/c some of the accessors are non-const
+ PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage *>(
+ static_cast<const PlanarYCbCrImage *>(aImage));
+
+ const PlanarYCbCrData *data = yuv->GetData();
+ if (data) {
+ uint8_t *y = data->mYChannel;
+ uint8_t *cb = data->mCbChannel;
+ uint8_t *cr = data->mCrChannel;
+ int32_t yStride = data->mYStride;
+ int32_t cbCrStride = data->mCbCrStride;
+ uint32_t width = yuv->GetSize().width;
+ uint32_t height = yuv->GetSize().height;
+
+ webrtc::I420VideoFrame i420_frame;
+ int rv = i420_frame.CreateFrame(y, cb, cr, width, height,
+ yStride, cbCrStride, cbCrStride,
+ webrtc::kVideoRotation_0);
+ if (rv != 0) {
+ NS_ERROR("Creating an I420 frame failed");
+ return;
+ }
+
+ MOZ_MTLOG(ML_DEBUG, "Sending an I420 video frame");
+ VideoFrameConverted(i420_frame);
+ return;
+ }
+ }
+
+ RefPtr<SourceSurface> surf = aImage->GetAsSourceSurface();
+ if (!surf) {
+ MOZ_MTLOG(ML_ERROR, "Getting surface from " << Stringify(format) << " image failed");
+ return;
+ }
+
+ RefPtr<DataSourceSurface> data = surf->GetDataSurface();
+ if (!data) {
+ MOZ_MTLOG(ML_ERROR, "Getting data surface from " << Stringify(format)
+ << " image with " << Stringify(surf->GetType()) << "("
+ << Stringify(surf->GetFormat()) << ") surface failed");
+ return;
+ }
+
+ IntSize size = aImage->GetSize();
+ int half_width = (size.width + 1) >> 1;
+ int half_height = (size.height + 1) >> 1;
+ int c_size = half_width * half_height;
+ int buffer_size = YSIZE(size.width, size.height) + 2 * c_size;
+ auto yuv_scoped = MakeUniqueFallible<uint8[]>(buffer_size);
+ if (!yuv_scoped) {
+ return;
+ }
+ uint8* yuv = yuv_scoped.get();
+
+ DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
+ if (!map.IsMapped()) {
+ MOZ_MTLOG(ML_ERROR, "Reading DataSourceSurface from " << Stringify(format)
+ << " image with " << Stringify(surf->GetType()) << "("
+ << Stringify(surf->GetFormat()) << ") surface failed");
+ return;
+ }
+
+ int rv;
+ int cb_offset = YSIZE(size.width, size.height);
+ int cr_offset = cb_offset + c_size;
+ switch (surf->GetFormat()) {
+ case SurfaceFormat::B8G8R8A8:
+ case SurfaceFormat::B8G8R8X8:
+ rv = libyuv::ARGBToI420(static_cast<uint8*>(map.GetData()),
+ map.GetStride(),
+ yuv, size.width,
+ yuv + cb_offset, half_width,
+ yuv + cr_offset, half_width,
+ size.width, size.height);
+ break;
+ case SurfaceFormat::R5G6B5_UINT16:
+ rv = libyuv::RGB565ToI420(static_cast<uint8*>(map.GetData()),
+ map.GetStride(),
+ yuv, size.width,
+ yuv + cb_offset, half_width,
+ yuv + cr_offset, half_width,
+ size.width, size.height);
+ break;
+ default:
+ MOZ_MTLOG(ML_ERROR, "Unsupported RGB video format" << Stringify(surf->GetFormat()));
+ MOZ_ASSERT(PR_FALSE);
+ return;
+ }
+ if (rv != 0) {
+ MOZ_MTLOG(ML_ERROR, Stringify(surf->GetFormat()) << " to I420 conversion failed");
+ return;
+ }
+ MOZ_MTLOG(ML_DEBUG, "Sending an I420 video frame converted from " <<
+ Stringify(surf->GetFormat()));
+ VideoFrameConverted(yuv, buffer_size, size.width, size.height, mozilla::kVideoI420, 0);
+ }
+
+ Atomic<int32_t, Relaxed> mLength;
+ RefPtr<TaskQueue> mTaskQueue;
+
+ // Written and read from the queueing thread (normally MSG).
+ int32_t last_img_; // serial number of last Image
+ bool disabled_frame_sent_; // If a black frame has been sent after disabling.
+#ifdef DEBUG
+ uint32_t mThrottleCount;
+ uint32_t mThrottleRecord;
+#endif
+
+ // mMutex guards the below variables.
+ Mutex mMutex;
+ nsTArray<RefPtr<VideoConverterListener>> mListeners;
+};
+#endif
+
+// An async inserter for audio data, to avoid running audio codec encoders
+// on the MSG/input audio thread. Basically just bounces all the audio
+// data to a single audio processing/input queue. We could if we wanted to
+// use multiple threads and a TaskQueue.
+class AudioProxyThread
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
+
+ explicit AudioProxyThread(AudioSessionConduit *aConduit)
+ : mConduit(aConduit)
+ {
+ MOZ_ASSERT(mConduit);
+ MOZ_COUNT_CTOR(AudioProxyThread);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // Use only 1 thread; also forces FIFO operation
+ // We could use multiple threads, but that may be dicier with the webrtc.org
+ // code. If so we'd need to use TaskQueues like the videoframe converter
+ RefPtr<SharedThreadPool> pool =
+ SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
+
+ mThread = pool.get();
+#else
+ nsCOMPtr<nsIThread> thread;
+ if (!NS_WARN_IF(NS_FAILED(NS_NewNamedThread("AudioProxy", getter_AddRefs(thread))))) {
+ mThread = thread;
+ }
+#endif
+ }
+
+ // called on mThread
+ void InternalProcessAudioChunk(
+ TrackRate rate,
+ AudioChunk& chunk,
+ bool enabled) {
+
+ // Convert to interleaved, 16-bits integer audio, with a maximum of two
+ // channels (since the WebRTC.org code below makes the assumption that the
+ // input audio is either mono or stereo).
+ uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
+ const int16_t* samples = nullptr;
+ UniquePtr<int16_t[]> convertedSamples;
+
+ // We take advantage of the fact that the common case (microphone directly to
+ // PeerConnection, that is, a normal call), the samples are already 16-bits
+ // mono, so the representation in interleaved and planar is the same, and we
+ // can just use that.
+ if (enabled && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+ samples = chunk.ChannelData<int16_t>().Elements()[0];
+ } else {
+ convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
+
+ if (!enabled || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
+ PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
+ } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
+ DownmixAndInterleave(chunk.ChannelData<float>(),
+ chunk.mDuration, chunk.mVolume, outputChannels,
+ convertedSamples.get());
+ } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+ DownmixAndInterleave(chunk.ChannelData<int16_t>(),
+ chunk.mDuration, chunk.mVolume, outputChannels,
+ convertedSamples.get());
+ }
+ samples = convertedSamples.get();
+ }
+
+ MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
+
+ // Check if the rate or the number of channels has changed since the last time
+ // we came through. I realize it may be overkill to check if the rate has
+ // changed, but I believe it is possible (e.g. if we change sources) and it
+ // costs us very little to handle this case.
+
+ uint32_t audio_10ms = rate / 100;
+
+ if (!packetizer_ ||
+ packetizer_->PacketSize() != audio_10ms ||
+ packetizer_->Channels() != outputChannels) {
+ // It's ok to drop the audio still in the packetizer here.
+ packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
+ }
+
+ packetizer_->Input(samples, chunk.mDuration);
+
+ while (packetizer_->PacketsAvailable()) {
+ uint32_t samplesPerPacket = packetizer_->PacketSize() *
+ packetizer_->Channels();
+ // We know that webrtc.org's code going to copy the samples down the line,
+ // so we can just use a stack buffer here instead of malloc-ing.
+ int16_t packet[AUDIO_SAMPLE_BUFFER_MAX];
+
+ packetizer_->Output(packet);
+ mConduit->SendAudioFrame(packet, samplesPerPacket, rate, 0);
+ }
+ }
+
+ void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
+ {
+ RUN_ON_THREAD(mThread,
+ WrapRunnable(RefPtr<AudioProxyThread>(this),
+ &AudioProxyThread::InternalProcessAudioChunk,
+ rate, chunk, enabled),
+ NS_DISPATCH_NORMAL);
+ }
+
+protected:
+ virtual ~AudioProxyThread()
+ {
+ // Conduits must be released on MainThread, and we might have the last reference
+ // We don't need to worry about runnables still trying to access the conduit, since
+ // the runnables hold a ref to AudioProxyThread.
+ NS_ReleaseOnMainThread(mConduit.forget());
+ MOZ_COUNT_DTOR(AudioProxyThread);
+ }
+
+ RefPtr<AudioSessionConduit> mConduit;
+ nsCOMPtr<nsIEventTarget> mThread;
+ // Only accessed on mThread
+ nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
+};
+
+static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
+
+MediaPipeline::MediaPipeline(const std::string& pc,
+ Direction direction,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ const std::string& track_id,
+ int level,
+ RefPtr<MediaSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter)
+ : direction_(direction),
+ track_id_(track_id),
+ level_(level),
+ conduit_(conduit),
+ rtp_(rtp_transport, rtcp_transport ? RTP : MUX),
+ rtcp_(rtcp_transport ? rtcp_transport : rtp_transport,
+ rtcp_transport ? RTCP : MUX),
+ main_thread_(main_thread),
+ sts_thread_(sts_thread),
+ rtp_packets_sent_(0),
+ rtcp_packets_sent_(0),
+ rtp_packets_received_(0),
+ rtcp_packets_received_(0),
+ rtp_bytes_sent_(0),
+ rtp_bytes_received_(0),
+ pc_(pc),
+ description_(),
+ filter_(filter),
+ rtp_parser_(webrtc::RtpHeaderParser::Create()) {
+ // To indicate rtcp-mux rtcp_transport should be nullptr.
+ // Therefore it's an error to send in the same flow for
+ // both rtp and rtcp.
+ MOZ_ASSERT(rtp_transport != rtcp_transport);
+
+ // PipelineTransport() will access this->sts_thread_; moved here for safety
+ transport_ = new PipelineTransport(this);
+}
+
+MediaPipeline::~MediaPipeline() {
+ ASSERT_ON_THREAD(main_thread_);
+ MOZ_MTLOG(ML_INFO, "Destroying MediaPipeline: " << description_);
+}
+
+nsresult MediaPipeline::Init() {
+ ASSERT_ON_THREAD(main_thread_);
+
+ if (direction_ == RECEIVE) {
+ conduit_->SetReceiverTransport(transport_);
+ } else {
+ conduit_->SetTransmitterTransport(transport_);
+ }
+
+ RUN_ON_THREAD(sts_thread_,
+ WrapRunnable(
+ RefPtr<MediaPipeline>(this),
+ &MediaPipeline::Init_s),
+ NS_DISPATCH_NORMAL);
+
+ return NS_OK;
+}
+
+nsresult MediaPipeline::Init_s() {
+ ASSERT_ON_THREAD(sts_thread_);
+
+ return AttachTransport_s();
+}
+
+
+// Disconnect us from the transport so that we can cleanly destruct the
+// pipeline on the main thread. ShutdownMedia_m() must have already been
+// called
+void
+MediaPipeline::DetachTransport_s()
+{
+ ASSERT_ON_THREAD(sts_thread_);
+
+ disconnect_all();
+ transport_->Detach();
+ rtp_.Detach();
+ rtcp_.Detach();
+}
+
+nsresult
+MediaPipeline::AttachTransport_s()
+{
+ ASSERT_ON_THREAD(sts_thread_);
+ nsresult res;
+ MOZ_ASSERT(rtp_.transport_);
+ MOZ_ASSERT(rtcp_.transport_);
+ res = ConnectTransport_s(rtp_);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+
+ if (rtcp_.transport_ != rtp_.transport_) {
+ res = ConnectTransport_s(rtcp_);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+ }
+
+ transport_->Attach(this);
+
+ return NS_OK;
+}
+
+void
+MediaPipeline::UpdateTransport_m(int level,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter)
+{
+ RUN_ON_THREAD(sts_thread_,
+ WrapRunnable(
+ this,
+ &MediaPipeline::UpdateTransport_s,
+ level,
+ rtp_transport,
+ rtcp_transport,
+ filter),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+MediaPipeline::UpdateTransport_s(int level,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter)
+{
+ bool rtcp_mux = false;
+ if (!rtcp_transport) {
+ rtcp_transport = rtp_transport;
+ rtcp_mux = true;
+ }
+
+ if ((rtp_transport != rtp_.transport_) ||
+ (rtcp_transport != rtcp_.transport_)) {
+ DetachTransport_s();
+ rtp_ = TransportInfo(rtp_transport, rtcp_mux ? MUX : RTP);
+ rtcp_ = TransportInfo(rtcp_transport, rtcp_mux ? MUX : RTCP);
+ AttachTransport_s();
+ }
+
+ level_ = level;
+
+ if (filter_ && filter) {
+ // Use the new filter, but don't forget any remote SSRCs that we've learned
+ // by receiving traffic.
+ filter_->Update(*filter);
+ } else {
+ filter_ = filter;
+ }
+}
+
+void
+MediaPipeline::SelectSsrc_m(size_t ssrc_index)
+{
+ RUN_ON_THREAD(sts_thread_,
+ WrapRunnable(
+ this,
+ &MediaPipeline::SelectSsrc_s,
+ ssrc_index),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+MediaPipeline::SelectSsrc_s(size_t ssrc_index)
+{
+ filter_ = new MediaPipelineFilter;
+ if (ssrc_index < ssrcs_received_.size()) {
+ filter_->AddRemoteSSRC(ssrcs_received_[ssrc_index]);
+ } else {
+ MOZ_MTLOG(ML_WARNING, "SelectSsrc called with " << ssrc_index << " but we "
+ << "have only seen " << ssrcs_received_.size()
+ << " ssrcs");
+ }
+}
+
+void MediaPipeline::StateChange(TransportFlow *flow, TransportLayer::State state) {
+ TransportInfo* info = GetTransportInfo_s(flow);
+ MOZ_ASSERT(info);
+
+ if (state == TransportLayer::TS_OPEN) {
+ MOZ_MTLOG(ML_INFO, "Flow is ready");
+ TransportReady_s(*info);
+ } else if (state == TransportLayer::TS_CLOSED ||
+ state == TransportLayer::TS_ERROR) {
+ TransportFailed_s(*info);
+ }
+}
+
+static bool MakeRtpTypeToStringArray(const char** array) {
+ static const char* RTP_str = "RTP";
+ static const char* RTCP_str = "RTCP";
+ static const char* MUX_str = "RTP/RTCP mux";
+ array[MediaPipeline::RTP] = RTP_str;
+ array[MediaPipeline::RTCP] = RTCP_str;
+ array[MediaPipeline::MUX] = MUX_str;
+ return true;
+}
+
+static const char* ToString(MediaPipeline::RtpType type) {
+ static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = {nullptr};
+ // Dummy variable to cause init to happen only on first call
+ static bool dummy = MakeRtpTypeToStringArray(array);
+ (void)dummy;
+ return array[type];
+}
+
+nsresult MediaPipeline::TransportReady_s(TransportInfo &info) {
+ MOZ_ASSERT(!description_.empty());
+
+ // TODO(ekr@rtfm.com): implement some kind of notification on
+ // failure. bug 852665.
+ if (info.state_ != MP_CONNECTING) {
+ MOZ_MTLOG(ML_ERROR, "Transport ready for flow in wrong state:" <<
+ description_ << ": " << ToString(info.type_));
+ return NS_ERROR_FAILURE;
+ }
+
+ MOZ_MTLOG(ML_INFO, "Transport ready for pipeline " <<
+ static_cast<void *>(this) << " flow " << description_ << ": " <<
+ ToString(info.type_));
+
+ // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
+ nsresult res;
+
+ // Now instantiate the SRTP objects
+ TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
+ info.transport_->GetLayer(TransportLayerDtls::ID()));
+ MOZ_ASSERT(dtls); // DTLS is mandatory
+
+ uint16_t cipher_suite;
+ res = dtls->GetSrtpCipher(&cipher_suite);
+ if (NS_FAILED(res)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to negotiate DTLS-SRTP. This is an error");
+ info.state_ = MP_CLOSED;
+ UpdateRtcpMuxState(info);
+ return res;
+ }
+
+ // SRTP Key Exporter as per RFC 5764 S 4.2
+ unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
+ res = dtls->ExportKeyingMaterial(kDTLSExporterLabel, false, "",
+ srtp_block, sizeof(srtp_block));
+ if (NS_FAILED(res)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to compute DTLS-SRTP keys. This is an error");
+ info.state_ = MP_CLOSED;
+ UpdateRtcpMuxState(info);
+ MOZ_CRASH(); // TODO: Remove once we have enough field experience to
+ // know it doesn't happen. bug 798797. Note that the
+ // code after this never executes.
+ return res;
+ }
+
+ // Slice and dice as per RFC 5764 S 4.2
+ unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
+ unsigned char server_write_key[SRTP_TOTAL_KEY_LENGTH];
+ int offset = 0;
+ memcpy(client_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
+ offset += SRTP_MASTER_KEY_LENGTH;
+ memcpy(server_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
+ offset += SRTP_MASTER_KEY_LENGTH;
+ memcpy(client_write_key + SRTP_MASTER_KEY_LENGTH,
+ srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
+ offset += SRTP_MASTER_SALT_LENGTH;
+ memcpy(server_write_key + SRTP_MASTER_KEY_LENGTH,
+ srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
+ offset += SRTP_MASTER_SALT_LENGTH;
+ MOZ_ASSERT(offset == sizeof(srtp_block));
+
+ unsigned char *write_key;
+ unsigned char *read_key;
+
+ if (dtls->role() == TransportLayerDtls::CLIENT) {
+ write_key = client_write_key;
+ read_key = server_write_key;
+ } else {
+ write_key = server_write_key;
+ read_key = client_write_key;
+ }
+
+ MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
+ info.send_srtp_ = SrtpFlow::Create(cipher_suite, false, write_key,
+ SRTP_TOTAL_KEY_LENGTH);
+ info.recv_srtp_ = SrtpFlow::Create(cipher_suite, true, read_key,
+ SRTP_TOTAL_KEY_LENGTH);
+ if (!info.send_srtp_ || !info.recv_srtp_) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't create SRTP flow for "
+ << ToString(info.type_));
+ info.state_ = MP_CLOSED;
+ UpdateRtcpMuxState(info);
+ return NS_ERROR_FAILURE;
+ }
+
+ MOZ_MTLOG(ML_INFO, "Listening for " << ToString(info.type_)
+ << " packets received on " <<
+ static_cast<void *>(dtls->downward()));
+
+ switch (info.type_) {
+ case RTP:
+ dtls->downward()->SignalPacketReceived.connect(
+ this,
+ &MediaPipeline::RtpPacketReceived);
+ break;
+ case RTCP:
+ dtls->downward()->SignalPacketReceived.connect(
+ this,
+ &MediaPipeline::RtcpPacketReceived);
+ break;
+ case MUX:
+ dtls->downward()->SignalPacketReceived.connect(
+ this,
+ &MediaPipeline::PacketReceived);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ info.state_ = MP_OPEN;
+ UpdateRtcpMuxState(info);
+ return NS_OK;
+}
+
+nsresult MediaPipeline::TransportFailed_s(TransportInfo &info) {
+ ASSERT_ON_THREAD(sts_thread_);
+
+ info.state_ = MP_CLOSED;
+ UpdateRtcpMuxState(info);
+
+ MOZ_MTLOG(ML_INFO, "Transport closed for flow " << ToString(info.type_));
+
+ NS_WARNING(
+ "MediaPipeline Transport failed. This is not properly cleaned up yet");
+
+ // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
+ // connection was good and now it is bad.
+ // TODO(ekr@rtfm.com): Report up so that the PC knows we
+ // have experienced an error.
+
+ return NS_OK;
+}
+
+void MediaPipeline::UpdateRtcpMuxState(TransportInfo &info) {
+ if (info.type_ == MUX) {
+ if (info.transport_ == rtcp_.transport_) {
+ rtcp_.state_ = info.state_;
+ if (!rtcp_.send_srtp_) {
+ rtcp_.send_srtp_ = info.send_srtp_;
+ rtcp_.recv_srtp_ = info.recv_srtp_;
+ }
+ }
+ }
+}
+
+nsresult MediaPipeline::SendPacket(TransportFlow *flow, const void *data,
+ int len) {
+ ASSERT_ON_THREAD(sts_thread_);
+
+ // Note that we bypass the DTLS layer here
+ TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
+ flow->GetLayer(TransportLayerDtls::ID()));
+ MOZ_ASSERT(dtls);
+
+ TransportResult res = dtls->downward()->
+ SendPacket(static_cast<const unsigned char *>(data), len);
+
+ if (res != len) {
+ // Ignore blocking indications
+ if (res == TE_WOULDBLOCK)
+ return NS_OK;
+
+ MOZ_MTLOG(ML_ERROR, "Failed write on stream " << description_);
+ return NS_BASE_STREAM_CLOSED;
+ }
+
+ return NS_OK;
+}
+
+void MediaPipeline::increment_rtp_packets_sent(int32_t bytes) {
+ ++rtp_packets_sent_;
+ rtp_bytes_sent_ += bytes;
+
+ if (!(rtp_packets_sent_ % 100)) {
+ MOZ_MTLOG(ML_INFO, "RTP sent packet count for " << description_
+ << " Pipeline " << static_cast<void *>(this)
+ << " Flow : " << static_cast<void *>(rtp_.transport_)
+ << ": " << rtp_packets_sent_
+ << " (" << rtp_bytes_sent_ << " bytes)");
+ }
+}
+
+void MediaPipeline::increment_rtcp_packets_sent() {
+ ++rtcp_packets_sent_;
+ if (!(rtcp_packets_sent_ % 100)) {
+ MOZ_MTLOG(ML_INFO, "RTCP sent packet count for " << description_
+ << " Pipeline " << static_cast<void *>(this)
+ << " Flow : " << static_cast<void *>(rtcp_.transport_)
+ << ": " << rtcp_packets_sent_);
+ }
+}
+
+void MediaPipeline::increment_rtp_packets_received(int32_t bytes) {
+ ++rtp_packets_received_;
+ rtp_bytes_received_ += bytes;
+ if (!(rtp_packets_received_ % 100)) {
+ MOZ_MTLOG(ML_INFO, "RTP received packet count for " << description_
+ << " Pipeline " << static_cast<void *>(this)
+ << " Flow : " << static_cast<void *>(rtp_.transport_)
+ << ": " << rtp_packets_received_
+ << " (" << rtp_bytes_received_ << " bytes)");
+ }
+}
+
+void MediaPipeline::increment_rtcp_packets_received() {
+ ++rtcp_packets_received_;
+ if (!(rtcp_packets_received_ % 100)) {
+ MOZ_MTLOG(ML_INFO, "RTCP received packet count for " << description_
+ << " Pipeline " << static_cast<void *>(this)
+ << " Flow : " << static_cast<void *>(rtcp_.transport_)
+ << ": " << rtcp_packets_received_);
+ }
+}
+
+void MediaPipeline::RtpPacketReceived(TransportLayer *layer,
+ const unsigned char *data,
+ size_t len) {
+ if (!transport_->pipeline()) {
+ MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport disconnected");
+ return;
+ }
+
+ if (!conduit_) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
+ return;
+ }
+
+ if (rtp_.state_ != MP_OPEN) {
+ MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; pipeline not open");
+ return;
+ }
+
+ if (rtp_.transport_->state() != TransportLayer::TS_OPEN) {
+ MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
+ return;
+ }
+
+ // This should never happen.
+ MOZ_ASSERT(rtp_.recv_srtp_);
+
+ if (direction_ == TRANSMIT) {
+ return;
+ }
+
+ if (!len) {
+ return;
+ }
+
+ // Filter out everything but RTP/RTCP
+ if (data[0] < 128 || data[0] > 191) {
+ return;
+ }
+
+ webrtc::RTPHeader header;
+ if (!rtp_parser_->Parse(data, len, &header)) {
+ return;
+ }
+
+ if (std::find(ssrcs_received_.begin(), ssrcs_received_.end(), header.ssrc) ==
+ ssrcs_received_.end()) {
+ ssrcs_received_.push_back(header.ssrc);
+ }
+
+ if (filter_ && !filter_->Filter(header)) {
+ return;
+ }
+
+ // Make a copy rather than cast away constness
+ auto inner_data = MakeUnique<unsigned char[]>(len);
+ memcpy(inner_data.get(), data, len);
+ int out_len = 0;
+ nsresult res = rtp_.recv_srtp_->UnprotectRtp(inner_data.get(),
+ len, len, &out_len);
+ if (!NS_SUCCEEDED(res)) {
+ char tmp[16];
+
+ SprintfLiteral(tmp, "%.2x %.2x %.2x %.2x",
+ inner_data[0],
+ inner_data[1],
+ inner_data[2],
+ inner_data[3]);
+
+ MOZ_MTLOG(ML_NOTICE, "Error unprotecting RTP in " << description_
+ << "len= " << len << "[" << tmp << "...]");
+
+ return;
+ }
+ MOZ_MTLOG(ML_DEBUG, description_ << " received RTP packet.");
+ increment_rtp_packets_received(out_len);
+
+ (void)conduit_->ReceivedRTPPacket(inner_data.get(), out_len); // Ignore error codes
+}
+
+void MediaPipeline::RtcpPacketReceived(TransportLayer *layer,
+ const unsigned char *data,
+ size_t len) {
+ if (!transport_->pipeline()) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
+ return;
+ }
+
+ if (!conduit_) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
+ return;
+ }
+
+ if (rtcp_.state_ != MP_OPEN) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; pipeline not open");
+ return;
+ }
+
+ if (rtcp_.transport_->state() != TransportLayer::TS_OPEN) {
+ MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
+ return;
+ }
+
+ if (!len) {
+ return;
+ }
+
+ // Filter out everything but RTP/RTCP
+ if (data[0] < 128 || data[0] > 191) {
+ return;
+ }
+
+ // We do not filter RTCP for send pipelines, since the webrtc.org code for
+ // senders already has logic to ignore RRs that do not apply.
+ // TODO bug 1279153: remove SR check for reduced size RTCP
+ if (filter_ && direction_ == RECEIVE) {
+ if (!filter_->FilterSenderReport(data, len)) {
+ MOZ_MTLOG(ML_NOTICE, "Dropping incoming RTCP packet; filtered out");
+ return;
+ }
+ }
+
+ // Make a copy rather than cast away constness
+ auto inner_data = MakeUnique<unsigned char[]>(len);
+ memcpy(inner_data.get(), data, len);
+ int out_len;
+
+ nsresult res = rtcp_.recv_srtp_->UnprotectRtcp(inner_data.get(),
+ len,
+ len,
+ &out_len);
+
+ if (!NS_SUCCEEDED(res))
+ return;
+
+ MOZ_MTLOG(ML_DEBUG, description_ << " received RTCP packet.");
+ increment_rtcp_packets_received();
+
+ MOZ_ASSERT(rtcp_.recv_srtp_); // This should never happen
+
+ (void)conduit_->ReceivedRTCPPacket(inner_data.get(), out_len); // Ignore error codes
+}
+
+bool MediaPipeline::IsRtp(const unsigned char *data, size_t len) {
+ if (len < 2)
+ return false;
+
+ // Check if this is a RTCP packet. Logic based on the types listed in
+ // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
+
+ // Anything outside this range is RTP.
+ if ((data[1] < 192) || (data[1] > 207))
+ return true;
+
+ if (data[1] == 192) // FIR
+ return false;
+
+ if (data[1] == 193) // NACK, but could also be RTP. This makes us sad
+ return true; // but it's how webrtc.org behaves.
+
+ if (data[1] == 194)
+ return true;
+
+ if (data[1] == 195) // IJ.
+ return false;
+
+ if ((data[1] > 195) && (data[1] < 200)) // the > 195 is redundant
+ return true;
+
+ if ((data[1] >= 200) && (data[1] <= 207)) // SR, RR, SDES, BYE,
+ return false; // APP, RTPFB, PSFB, XR
+
+ MOZ_ASSERT(false); // Not reached, belt and suspenders.
+ return true;
+}
+
+void MediaPipeline::PacketReceived(TransportLayer *layer,
+ const unsigned char *data,
+ size_t len) {
+ if (!transport_->pipeline()) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
+ return;
+ }
+
+ if (IsRtp(data, len)) {
+ RtpPacketReceived(layer, data, len);
+ } else {
+ RtcpPacketReceived(layer, data, len);
+ }
+}
+
+class MediaPipelineTransmit::PipelineListener
+ : public DirectMediaStreamTrackListener
+{
+friend class MediaPipelineTransmit;
+public:
+ explicit PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
+ : conduit_(conduit),
+ track_id_(TRACK_INVALID),
+ mMutex("MediaPipelineTransmit::PipelineListener"),
+ track_id_external_(TRACK_INVALID),
+ active_(false),
+ enabled_(false),
+ direct_connect_(false)
+ {
+ }
+
+ ~PipelineListener()
+ {
+ if (!NS_IsMainThread()) {
+ // release conduit on mainthread. Must use forget()!
+ nsresult rv = NS_DispatchToMainThread(new
+ ConduitDeleteEvent(conduit_.forget()));
+ MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
+ if (NS_FAILED(rv)) {
+ MOZ_CRASH();
+ }
+ } else {
+ conduit_ = nullptr;
+ }
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (converter_) {
+ converter_->Shutdown();
+ }
+#endif
+ }
+
+ // Dispatches setting the internal TrackID to TRACK_INVALID to the media
+ // graph thread to keep it in sync with other MediaStreamGraph operations
+ // like RemoveListener() and AddListener(). The TrackID will be updated on
+ // the next NewData() callback.
+ void UnsetTrackId(MediaStreamGraphImpl* graph);
+
+ void SetActive(bool active) { active_ = active; }
+ void SetEnabled(bool enabled) { enabled_ = enabled; }
+
+ // These are needed since nested classes don't have access to any particular
+ // instance of the parent
+ void SetAudioProxy(const RefPtr<AudioProxyThread>& proxy)
+ {
+ audio_processing_ = proxy;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& converter)
+ {
+ converter_ = converter;
+ }
+
+ void OnVideoFrameConverted(unsigned char* aVideoFrame,
+ unsigned int aVideoFrameLength,
+ unsigned short aWidth,
+ unsigned short aHeight,
+ VideoType aVideoType,
+ uint64_t aCaptureTime)
+ {
+ MOZ_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
+ static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(
+ aVideoFrame, aVideoFrameLength, aWidth, aHeight, aVideoType, aCaptureTime);
+ }
+
+ void OnVideoFrameConverted(webrtc::I420VideoFrame& aVideoFrame)
+ {
+ MOZ_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
+ static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(aVideoFrame);
+ }
+#endif
+
+ // Implement MediaStreamTrackListener
+ void NotifyQueuedChanges(MediaStreamGraph* aGraph,
+ StreamTime aTrackOffset,
+ const MediaSegment& aQueuedMedia) override;
+
+ // Implement DirectMediaStreamTrackListener
+ void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
+ StreamTime aTrackOffset,
+ const MediaSegment& aMedia) override;
+ void NotifyDirectListenerInstalled(InstallationResult aResult) override;
+ void NotifyDirectListenerUninstalled() override;
+
+private:
+ void UnsetTrackIdImpl() {
+ MutexAutoLock lock(mMutex);
+ track_id_ = track_id_external_ = TRACK_INVALID;
+ }
+
+ void NewData(MediaStreamGraph* graph,
+ StreamTime offset,
+ const MediaSegment& media);
+
+ RefPtr<MediaSessionConduit> conduit_;
+ RefPtr<AudioProxyThread> audio_processing_;
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<VideoFrameConverter> converter_;
+#endif
+
+ // May be TRACK_INVALID until we see data from the track
+ TrackID track_id_; // this is the current TrackID this listener is attached to
+ Mutex mMutex;
+ // protected by mMutex
+ // May be TRACK_INVALID until we see data from the track
+ TrackID track_id_external_; // this is queried from other threads
+
+ // active is true if there is a transport to send on
+ mozilla::Atomic<bool> active_;
+ // enabled is true if the media access control permits sending
+ // actual content; when false you get black/silence
+ mozilla::Atomic<bool> enabled_;
+
+ // Written and read on the MediaStreamGraph thread
+ bool direct_connect_;
+};
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+// Implements VideoConverterListener for MediaPipeline.
+//
+// We pass converted frames on to MediaPipelineTransmit::PipelineListener
+// where they are further forwarded to VideoConduit.
+// MediaPipelineTransmit calls Detach() during shutdown to ensure there is
+// no cyclic dependencies between us and PipelineListener.
+class MediaPipelineTransmit::VideoFrameFeeder
+ : public VideoConverterListener
+{
+public:
+ explicit VideoFrameFeeder(const RefPtr<PipelineListener>& listener)
+ : listener_(listener),
+ mutex_("VideoFrameFeeder")
+ {
+ MOZ_COUNT_CTOR(VideoFrameFeeder);
+ }
+
+ void Detach()
+ {
+ MutexAutoLock lock(mutex_);
+
+ listener_ = nullptr;
+ }
+
+ void OnVideoFrameConverted(unsigned char* aVideoFrame,
+ unsigned int aVideoFrameLength,
+ unsigned short aWidth,
+ unsigned short aHeight,
+ VideoType aVideoType,
+ uint64_t aCaptureTime) override
+ {
+ MutexAutoLock lock(mutex_);
+
+ if (!listener_) {
+ return;
+ }
+
+ listener_->OnVideoFrameConverted(aVideoFrame, aVideoFrameLength,
+ aWidth, aHeight, aVideoType, aCaptureTime);
+ }
+
+ void OnVideoFrameConverted(webrtc::I420VideoFrame& aVideoFrame) override
+ {
+ MutexAutoLock lock(mutex_);
+
+ if (!listener_) {
+ return;
+ }
+
+ listener_->OnVideoFrameConverted(aVideoFrame);
+ }
+
+protected:
+ virtual ~VideoFrameFeeder()
+ {
+ MOZ_COUNT_DTOR(VideoFrameFeeder);
+ }
+
+ RefPtr<PipelineListener> listener_;
+ Mutex mutex_;
+};
+#endif
+
+class MediaPipelineTransmit::PipelineVideoSink :
+ public MediaStreamVideoSink
+{
+public:
+ explicit PipelineVideoSink(const RefPtr<MediaSessionConduit>& conduit,
+ MediaPipelineTransmit::PipelineListener* listener)
+ : conduit_(conduit)
+ , pipelineListener_(listener)
+ {
+ }
+
+ virtual void SetCurrentFrames(const VideoSegment& aSegment) override;
+ virtual void ClearFrames() override {}
+
+private:
+ ~PipelineVideoSink() {
+ // release conduit on mainthread. Must use forget()!
+ nsresult rv = NS_DispatchToMainThread(new
+ ConduitDeleteEvent(conduit_.forget()));
+ MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
+ if (NS_FAILED(rv)) {
+ MOZ_CRASH();
+ }
+ }
+ RefPtr<MediaSessionConduit> conduit_;
+ MediaPipelineTransmit::PipelineListener* pipelineListener_;
+};
+
+MediaPipelineTransmit::MediaPipelineTransmit(
+ const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ dom::MediaStreamTrack* domtrack,
+ const std::string& track_id,
+ int level,
+ RefPtr<MediaSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter) :
+ MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, track_id, level,
+ conduit, rtp_transport, rtcp_transport, filter),
+ listener_(new PipelineListener(conduit)),
+ video_sink_(new PipelineVideoSink(conduit, listener_)),
+ domtrack_(domtrack)
+{
+ if (!IsVideo()) {
+ audio_processing_ = MakeAndAddRef<AudioProxyThread>(static_cast<AudioSessionConduit*>(conduit.get()));
+ listener_->SetAudioProxy(audio_processing_);
+ }
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ else { // Video
+ // For video we send frames to an async VideoFrameConverter that calls
+ // back to a VideoFrameFeeder that feeds I420 frames to VideoConduit.
+
+ feeder_ = MakeAndAddRef<VideoFrameFeeder>(listener_);
+
+ converter_ = MakeAndAddRef<VideoFrameConverter>();
+ converter_->AddListener(feeder_);
+
+ listener_->SetVideoFrameConverter(converter_);
+ }
+#endif
+}
+
+MediaPipelineTransmit::~MediaPipelineTransmit()
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (feeder_) {
+ feeder_->Detach();
+ }
+#endif
+}
+
+nsresult MediaPipelineTransmit::Init() {
+ AttachToTrack(track_id_);
+
+ return MediaPipeline::Init();
+}
+
+void MediaPipelineTransmit::AttachToTrack(const std::string& track_id) {
+ ASSERT_ON_THREAD(main_thread_);
+
+ description_ = pc_ + "| ";
+ description_ += conduit_->type() == MediaSessionConduit::AUDIO ?
+ "Transmit audio[" : "Transmit video[";
+ description_ += track_id;
+ description_ += "]";
+
+ // TODO(ekr@rtfm.com): Check for errors
+ MOZ_MTLOG(ML_DEBUG, "Attaching pipeline to track "
+ << static_cast<void *>(domtrack_) << " conduit type=" <<
+ (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
+
+ // Register the Listener directly with the source if we can.
+ // We also register it as a non-direct listener so we fall back to that
+ // if installing the direct listener fails. As a direct listener we get access
+ // to direct unqueued (and not resampled) data.
+ domtrack_->AddDirectListener(listener_);
+ domtrack_->AddListener(listener_);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ domtrack_->AddDirectListener(video_sink_);
+#endif
+
+#ifndef MOZILLA_INTERNAL_API
+ // this enables the unit tests that can't fiddle with principals and the like
+ listener_->SetEnabled(true);
+#endif
+}
+
+bool
+MediaPipelineTransmit::IsVideo() const
+{
+ return !!domtrack_->AsVideoStreamTrack();
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+void MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* track,
+ nsIPrincipal* principal,
+ const PeerIdentity* sinkIdentity) {
+ ASSERT_ON_THREAD(main_thread_);
+
+ if (track != nullptr && track != domtrack_) {
+ // If a track is specified, then it might not be for this pipeline,
+ // since we receive notifications for all tracks on the PC.
+ // nullptr means that the PeerIdentity has changed and shall be applied
+ // to all tracks of the PC.
+ return;
+ }
+
+ bool enableTrack = principal->Subsumes(domtrack_->GetPrincipal());
+ if (!enableTrack) {
+ // first try didn't work, but there's a chance that this is still available
+ // if our track is bound to a peerIdentity, and the peer connection (our
+ // sink) is bound to the same identity, then we can enable the track.
+ const PeerIdentity* trackIdentity = domtrack_->GetPeerIdentity();
+ if (sinkIdentity && trackIdentity) {
+ enableTrack = (*sinkIdentity == *trackIdentity);
+ }
+ }
+
+ listener_->SetEnabled(enableTrack);
+}
+#endif
+
+void
+MediaPipelineTransmit::DetachMedia()
+{
+ ASSERT_ON_THREAD(main_thread_);
+ if (domtrack_) {
+ domtrack_->RemoveDirectListener(listener_);
+ domtrack_->RemoveListener(listener_);
+ domtrack_->RemoveDirectListener(video_sink_);
+ domtrack_ = nullptr;
+ }
+ // Let the listener be destroyed with the pipeline (or later).
+}
+
+nsresult MediaPipelineTransmit::TransportReady_s(TransportInfo &info) {
+ ASSERT_ON_THREAD(sts_thread_);
+ // Call base ready function.
+ MediaPipeline::TransportReady_s(info);
+
+ // Should not be set for a transmitter
+ if (&info == &rtp_) {
+ listener_->SetActive(true);
+ }
+
+ return NS_OK;
+}
+
+nsresult MediaPipelineTransmit::ReplaceTrack(MediaStreamTrack& domtrack) {
+ // MainThread, checked in calls we make
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsString nsTrackId;
+ domtrack.GetId(nsTrackId);
+ std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
+#else
+ std::string track_id = domtrack.GetId();
+#endif
+ MOZ_MTLOG(ML_DEBUG, "Reattaching pipeline " << description_ << " to track "
+ << static_cast<void *>(&domtrack)
+ << " track " << track_id << " conduit type=" <<
+ (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
+
+ DetachMedia();
+ domtrack_ = &domtrack; // Detach clears it
+ // Unsets the track id after RemoveListener() takes effect.
+ listener_->UnsetTrackId(domtrack_->GraphImpl());
+ track_id_ = track_id;
+ AttachToTrack(track_id);
+ return NS_OK;
+}
+
+void MediaPipeline::DisconnectTransport_s(TransportInfo &info) {
+ MOZ_ASSERT(info.transport_);
+ ASSERT_ON_THREAD(sts_thread_);
+
+ info.transport_->SignalStateChange.disconnect(this);
+ // We do this even if we're a transmitter, since we are still possibly
+ // registered to receive RTCP.
+ TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
+ info.transport_->GetLayer(TransportLayerDtls::ID()));
+ MOZ_ASSERT(dtls); // DTLS is mandatory
+ MOZ_ASSERT(dtls->downward());
+ dtls->downward()->SignalPacketReceived.disconnect(this);
+}
+
+nsresult MediaPipeline::ConnectTransport_s(TransportInfo &info) {
+ MOZ_ASSERT(info.transport_);
+ ASSERT_ON_THREAD(sts_thread_);
+
+ // Look to see if the transport is ready
+ if (info.transport_->state() == TransportLayer::TS_OPEN) {
+ nsresult res = TransportReady_s(info);
+ if (NS_FAILED(res)) {
+ MOZ_MTLOG(ML_ERROR, "Error calling TransportReady(); res="
+ << static_cast<uint32_t>(res) << " in " << __FUNCTION__);
+ return res;
+ }
+ } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
+ MOZ_MTLOG(ML_ERROR, ToString(info.type_)
+ << "transport is already in error state");
+ TransportFailed_s(info);
+ return NS_ERROR_FAILURE;
+ }
+
+ info.transport_->SignalStateChange.connect(this,
+ &MediaPipeline::StateChange);
+
+ return NS_OK;
+}
+
+MediaPipeline::TransportInfo* MediaPipeline::GetTransportInfo_s(
+ TransportFlow *flow) {
+ ASSERT_ON_THREAD(sts_thread_);
+ if (flow == rtp_.transport_) {
+ return &rtp_;
+ }
+
+ if (flow == rtcp_.transport_) {
+ return &rtcp_;
+ }
+
+ return nullptr;
+}
+
+nsresult MediaPipeline::PipelineTransport::SendRtpPacket(
+ const void *data, int len) {
+
+ nsAutoPtr<DataBuffer> buf(new DataBuffer(static_cast<const uint8_t *>(data),
+ len, len + SRTP_MAX_EXPANSION));
+
+ RUN_ON_THREAD(sts_thread_,
+ WrapRunnable(
+ RefPtr<MediaPipeline::PipelineTransport>(this),
+ &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
+ buf, true),
+ NS_DISPATCH_NORMAL);
+
+ return NS_OK;
+}
+
+nsresult MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
+ nsAutoPtr<DataBuffer> data,
+ bool is_rtp) {
+
+ ASSERT_ON_THREAD(sts_thread_);
+ if (!pipeline_) {
+ return NS_OK; // Detached
+ }
+ TransportInfo& transport = is_rtp ? pipeline_->rtp_ : pipeline_->rtcp_;
+
+ if (!transport.send_srtp_) {
+ MOZ_MTLOG(ML_DEBUG, "Couldn't write RTP/RTCP packet; SRTP not set up yet");
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(transport.transport_);
+ NS_ENSURE_TRUE(transport.transport_, NS_ERROR_NULL_POINTER);
+
+ // libsrtp enciphers in place, so we need a big enough buffer.
+ MOZ_ASSERT(data->capacity() >= data->len() + SRTP_MAX_EXPANSION);
+
+ int out_len;
+ nsresult res;
+ if (is_rtp) {
+ res = transport.send_srtp_->ProtectRtp(data->data(),
+ data->len(),
+ data->capacity(),
+ &out_len);
+ } else {
+ res = transport.send_srtp_->ProtectRtcp(data->data(),
+ data->len(),
+ data->capacity(),
+ &out_len);
+ }
+ if (!NS_SUCCEEDED(res)) {
+ return res;
+ }
+
+ // paranoia; don't have uninitialized bytes included in data->len()
+ data->SetLength(out_len);
+
+ MOZ_MTLOG(ML_DEBUG, pipeline_->description_ << " sending " <<
+ (is_rtp ? "RTP" : "RTCP") << " packet");
+ if (is_rtp) {
+ pipeline_->increment_rtp_packets_sent(out_len);
+ } else {
+ pipeline_->increment_rtcp_packets_sent();
+ }
+ return pipeline_->SendPacket(transport.transport_, data->data(), out_len);
+}
+
+nsresult MediaPipeline::PipelineTransport::SendRtcpPacket(
+ const void *data, int len) {
+
+ nsAutoPtr<DataBuffer> buf(new DataBuffer(static_cast<const uint8_t *>(data),
+ len, len + SRTP_MAX_EXPANSION));
+
+ RUN_ON_THREAD(sts_thread_,
+ WrapRunnable(
+ RefPtr<MediaPipeline::PipelineTransport>(this),
+ &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
+ buf, false),
+ NS_DISPATCH_NORMAL);
+
+ return NS_OK;
+}
+
+void MediaPipelineTransmit::PipelineListener::
+UnsetTrackId(MediaStreamGraphImpl* graph) {
+#ifndef USE_FAKE_MEDIA_STREAMS
+ class Message : public ControlMessage {
+ public:
+ explicit Message(PipelineListener* listener) :
+ ControlMessage(nullptr), listener_(listener) {}
+ virtual void Run() override
+ {
+ listener_->UnsetTrackIdImpl();
+ }
+ RefPtr<PipelineListener> listener_;
+ };
+ graph->AppendMessage(MakeUnique<Message>(this));
+#else
+ UnsetTrackIdImpl();
+#endif
+}
+// Called if we're attached with AddDirectListener()
+void MediaPipelineTransmit::PipelineListener::
+NotifyRealtimeTrackData(MediaStreamGraph* graph,
+ StreamTime offset,
+ const MediaSegment& media) {
+ MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyRealtimeTrackData() listener=" <<
+ this << ", offset=" << offset <<
+ ", duration=" << media.GetDuration());
+
+ NewData(graph, offset, media);
+}
+
+void MediaPipelineTransmit::PipelineListener::
+NotifyQueuedChanges(MediaStreamGraph* graph,
+ StreamTime offset,
+ const MediaSegment& queued_media) {
+ MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyQueuedChanges()");
+
+ // ignore non-direct data if we're also getting direct data
+ if (!direct_connect_) {
+ NewData(graph, offset, queued_media);
+ }
+}
+
+void MediaPipelineTransmit::PipelineListener::
+NotifyDirectListenerInstalled(InstallationResult aResult) {
+ MOZ_MTLOG(ML_INFO, "MediaPipeline::NotifyDirectListenerInstalled() listener= " <<
+ this << ", result=" << static_cast<int32_t>(aResult));
+
+ direct_connect_ = InstallationResult::SUCCESS == aResult;
+}
+
+void MediaPipelineTransmit::PipelineListener::
+NotifyDirectListenerUninstalled() {
+ MOZ_MTLOG(ML_INFO, "MediaPipeline::NotifyDirectListenerUninstalled() listener=" << this);
+
+ direct_connect_ = false;
+}
+
+void MediaPipelineTransmit::PipelineListener::
+NewData(MediaStreamGraph* graph,
+ StreamTime offset,
+ const MediaSegment& media) {
+ if (!active_) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready");
+ return;
+ }
+
+ if (conduit_->type() !=
+ (media.GetType() == MediaSegment::AUDIO ? MediaSessionConduit::AUDIO :
+ MediaSessionConduit::VIDEO)) {
+ MOZ_ASSERT(false, "The media type should always be correct since the "
+ "listener is locked to a specific track");
+ return;
+ }
+
+ // TODO(ekr@rtfm.com): For now assume that we have only one
+ // track type and it's destined for us
+ // See bug 784517
+ if (media.GetType() == MediaSegment::AUDIO) {
+ AudioSegment* audio = const_cast<AudioSegment *>(
+ static_cast<const AudioSegment *>(&media));
+
+ AudioSegment::ChunkIterator iter(*audio);
+ while(!iter.IsEnded()) {
+ TrackRate rate;
+#ifdef USE_FAKE_MEDIA_STREAMS
+ rate = Fake_MediaStream::GraphRate();
+#else
+ rate = graph->GraphRate();
+#endif
+ audio_processing_->QueueAudioChunk(rate, *iter, enabled_);
+ iter.Next();
+ }
+ } else {
+ // Ignore
+ }
+}
+
+void MediaPipelineTransmit::PipelineVideoSink::
+SetCurrentFrames(const VideoSegment& aSegment)
+{
+ MOZ_ASSERT(pipelineListener_);
+
+ if (!pipelineListener_->active_) {
+ MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready");
+ return;
+ }
+
+ if (conduit_->type() != MediaSessionConduit::VIDEO) {
+ // Ignore data of wrong kind in case we have a muxed stream
+ return;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ VideoSegment* video = const_cast<VideoSegment *>(&aSegment);
+
+ VideoSegment::ChunkIterator iter(*video);
+ while(!iter.IsEnded()) {
+ pipelineListener_->converter_->QueueVideoChunk(*iter, !pipelineListener_->enabled_);
+ iter.Next();
+ }
+#endif
+}
+
+class TrackAddedCallback {
+ public:
+ virtual void TrackAdded(TrackTicks current_ticks) = 0;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
+
+ protected:
+ virtual ~TrackAddedCallback() {}
+};
+
+class GenericReceiveListener;
+
+class GenericReceiveCallback : public TrackAddedCallback
+{
+ public:
+ explicit GenericReceiveCallback(GenericReceiveListener* listener)
+ : listener_(listener) {}
+
+ void TrackAdded(TrackTicks time);
+
+ private:
+ RefPtr<GenericReceiveListener> listener_;
+};
+
+// Add a listener on the MSG thread using the MSG command queue
+static void AddListener(MediaStream* source, MediaStreamListener* listener) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ class Message : public ControlMessage {
+ public:
+ Message(MediaStream* stream, MediaStreamListener* listener)
+ : ControlMessage(stream),
+ listener_(listener) {}
+
+ virtual void Run() override {
+ mStream->AddListenerImpl(listener_.forget());
+ }
+ private:
+ RefPtr<MediaStreamListener> listener_;
+ };
+
+ MOZ_ASSERT(listener);
+
+ source->GraphImpl()->AppendMessage(MakeUnique<Message>(source, listener));
+#else
+ source->AddListener(listener);
+#endif
+}
+
+class GenericReceiveListener : public MediaStreamListener
+{
+ public:
+ GenericReceiveListener(SourceMediaStream *source, TrackID track_id)
+ : source_(source),
+ track_id_(track_id),
+ played_ticks_(0),
+ principal_handle_(PRINCIPAL_HANDLE_NONE) {}
+
+ virtual ~GenericReceiveListener() {}
+
+ void AddSelf()
+ {
+ AddListener(source_, this);
+ }
+
+ void EndTrack()
+ {
+ source_->EndTrack(track_id_);
+ }
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+ // Must be called on the main thread
+ void SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+ {
+ class Message : public ControlMessage
+ {
+ public:
+ Message(GenericReceiveListener* listener,
+ MediaStream* stream,
+ const PrincipalHandle& principal_handle)
+ : ControlMessage(stream),
+ listener_(listener),
+ principal_handle_(principal_handle)
+ {}
+
+ void Run() override {
+ listener_->SetPrincipalHandle_msg(principal_handle_);
+ }
+
+ RefPtr<GenericReceiveListener> listener_;
+ PrincipalHandle principal_handle_;
+ };
+
+ source_->GraphImpl()->AppendMessage(MakeUnique<Message>(this, source_, principal_handle));
+ }
+
+ // Must be called on the MediaStreamGraph thread
+ void SetPrincipalHandle_msg(const PrincipalHandle& principal_handle)
+ {
+ principal_handle_ = principal_handle;
+ }
+#endif // USE_FAKE_MEDIA_STREAMS
+
+ protected:
+ SourceMediaStream *source_;
+ const TrackID track_id_;
+ TrackTicks played_ticks_;
+ PrincipalHandle principal_handle_;
+};
+
+MediaPipelineReceive::MediaPipelineReceive(
+ const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ SourceMediaStream *stream,
+ const std::string& track_id,
+ int level,
+ RefPtr<MediaSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter) :
+ MediaPipeline(pc, RECEIVE, main_thread, sts_thread,
+ track_id, level, conduit, rtp_transport,
+ rtcp_transport, filter),
+ stream_(stream),
+ segments_added_(0)
+{
+ MOZ_ASSERT(stream_);
+}
+
+MediaPipelineReceive::~MediaPipelineReceive()
+{
+ MOZ_ASSERT(!stream_); // Check that we have shut down already.
+}
+
+class MediaPipelineReceiveAudio::PipelineListener
+ : public GenericReceiveListener
+{
+public:
+ PipelineListener(SourceMediaStream * source, TrackID track_id,
+ const RefPtr<MediaSessionConduit>& conduit)
+ : GenericReceiveListener(source, track_id),
+ conduit_(conduit)
+ {
+ }
+
+ ~PipelineListener()
+ {
+ if (!NS_IsMainThread()) {
+ // release conduit on mainthread. Must use forget()!
+ nsresult rv = NS_DispatchToMainThread(new
+ ConduitDeleteEvent(conduit_.forget()));
+ MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
+ if (NS_FAILED(rv)) {
+ MOZ_CRASH();
+ }
+ } else {
+ conduit_ = nullptr;
+ }
+ }
+
+ // Implement MediaStreamListener
+ void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
+ {
+ MOZ_ASSERT(source_);
+ if (!source_) {
+ MOZ_MTLOG(ML_ERROR, "NotifyPull() called from a non-SourceMediaStream");
+ return;
+ }
+
+ // This comparison is done in total time to avoid accumulated roundoff errors.
+ while (source_->TicksToTimeRoundDown(WEBRTC_DEFAULT_SAMPLE_RATE,
+ played_ticks_) < desired_time) {
+ int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX];
+
+ int samples_length;
+
+ // This fetches 10ms of data, either mono or stereo
+ MediaConduitErrorCode err =
+ static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
+ scratch_buffer,
+ WEBRTC_DEFAULT_SAMPLE_RATE,
+ 0, // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
+ samples_length);
+
+ if (err != kMediaConduitNoError) {
+ // Insert silence on conduit/GIPS failure (extremely unlikely)
+ MOZ_MTLOG(ML_ERROR, "Audio conduit failed (" << err
+ << ") to return data @ " << played_ticks_
+ << " (desired " << desired_time << " -> "
+ << source_->StreamTimeToSeconds(desired_time) << ")");
+ // if this is not enough we'll loop and provide more
+ samples_length = WEBRTC_DEFAULT_SAMPLE_RATE/100;
+ PodArrayZero(scratch_buffer);
+ }
+
+ MOZ_ASSERT(samples_length * sizeof(uint16_t) < AUDIO_SAMPLE_BUFFER_MAX);
+
+ MOZ_MTLOG(ML_DEBUG, "Audio conduit returned buffer of length "
+ << samples_length);
+
+ RefPtr<SharedBuffer> samples = SharedBuffer::Create(samples_length * sizeof(uint16_t));
+ int16_t *samples_data = static_cast<int16_t *>(samples->Data());
+ AudioSegment segment;
+ // We derive the number of channels of the stream from the number of samples
+ // the AudioConduit gives us, considering it gives us packets of 10ms and we
+ // know the rate.
+ uint32_t channelCount = samples_length / (WEBRTC_DEFAULT_SAMPLE_RATE / 100);
+ AutoTArray<int16_t*,2> channels;
+ AutoTArray<const int16_t*,2> outputChannels;
+ size_t frames = samples_length / channelCount;
+
+ channels.SetLength(channelCount);
+
+ size_t offset = 0;
+ for (size_t i = 0; i < channelCount; i++) {
+ channels[i] = samples_data + offset;
+ offset += frames;
+ }
+
+ DeinterleaveAndConvertBuffer(scratch_buffer,
+ frames,
+ channelCount,
+ channels.Elements());
+
+ outputChannels.AppendElements(channels);
+
+ segment.AppendFrames(samples.forget(), outputChannels, frames,
+ principal_handle_);
+
+ // Handle track not actually added yet or removed/finished
+ if (source_->AppendToTrack(track_id_, &segment)) {
+ played_ticks_ += frames;
+ } else {
+ MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
+ // we can't un-read the data, but that's ok since we don't want to
+ // buffer - but don't i-loop!
+ return;
+ }
+ }
+ }
+
+private:
+ RefPtr<MediaSessionConduit> conduit_;
+};
+
+MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
+ const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ SourceMediaStream* stream,
+ const std::string& media_stream_track_id,
+ TrackID numeric_track_id,
+ int level,
+ RefPtr<AudioSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter) :
+ MediaPipelineReceive(pc, main_thread, sts_thread,
+ stream, media_stream_track_id, level, conduit,
+ rtp_transport, rtcp_transport, filter),
+ listener_(new PipelineListener(stream, numeric_track_id, conduit))
+{}
+
+void MediaPipelineReceiveAudio::DetachMedia()
+{
+ ASSERT_ON_THREAD(main_thread_);
+ if (stream_ && listener_) {
+ listener_->EndTrack();
+ stream_->RemoveListener(listener_);
+ stream_ = nullptr;
+ }
+}
+
+nsresult MediaPipelineReceiveAudio::Init() {
+ ASSERT_ON_THREAD(main_thread_);
+ MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
+
+ description_ = pc_ + "| Receive audio[";
+ description_ += track_id_;
+ description_ += "]";
+
+ listener_->AddSelf();
+
+ return MediaPipelineReceive::Init();
+}
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+void MediaPipelineReceiveAudio::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+{
+ listener_->SetPrincipalHandle_m(principal_handle);
+}
+#endif // USE_FAKE_MEDIA_STREAMS
+
+class MediaPipelineReceiveVideo::PipelineListener
+ : public GenericReceiveListener {
+public:
+ PipelineListener(SourceMediaStream * source, TrackID track_id)
+ : GenericReceiveListener(source, track_id),
+ width_(0),
+ height_(0),
+#if defined(MOZILLA_INTERNAL_API)
+ image_container_(),
+ image_(),
+#endif
+ monitor_("Video PipelineListener")
+ {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ image_container_ =
+ LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS);
+#endif
+ }
+
+ // Implement MediaStreamListener
+ void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
+ {
+ #if defined(MOZILLA_INTERNAL_API)
+ ReentrantMonitorAutoEnter enter(monitor_);
+
+ RefPtr<Image> image = image_;
+ StreamTime delta = desired_time - played_ticks_;
+
+ // Don't append if we've already provided a frame that supposedly
+ // goes past the current aDesiredTime Doing so means a negative
+ // delta and thus messes up handling of the graph
+ if (delta > 0) {
+ VideoSegment segment;
+ segment.AppendFrame(image.forget(), delta, IntSize(width_, height_),
+ principal_handle_);
+ // Handle track not actually added yet or removed/finished
+ if (source_->AppendToTrack(track_id_, &segment)) {
+ played_ticks_ = desired_time;
+ } else {
+ MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
+ return;
+ }
+ }
+ #endif
+ }
+
+ // Accessors for external writes from the renderer
+ void FrameSizeChange(unsigned int width,
+ unsigned int height,
+ unsigned int number_of_streams) {
+ ReentrantMonitorAutoEnter enter(monitor_);
+
+ width_ = width;
+ height_ = height;
+ }
+
+ void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const RefPtr<layers::Image>& video_image)
+ {
+ RenderVideoFrame(buffer, buffer_size, width_, (width_ + 1) >> 1,
+ time_stamp, render_time, video_image);
+ }
+
+ void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t y_stride,
+ uint32_t cbcr_stride,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const RefPtr<layers::Image>& video_image)
+ {
+#ifdef MOZILLA_INTERNAL_API
+ ReentrantMonitorAutoEnter enter(monitor_);
+#endif // MOZILLA_INTERNAL_API
+
+#if defined(MOZILLA_INTERNAL_API)
+ if (buffer) {
+ // Create a video frame using |buffer|.
+#ifdef MOZ_WIDGET_GONK
+ RefPtr<PlanarYCbCrImage> yuvImage = new GrallocImage();
+#else
+ RefPtr<PlanarYCbCrImage> yuvImage = image_container_->CreatePlanarYCbCrImage();
+#endif
+ uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
+
+ PlanarYCbCrData yuvData;
+ yuvData.mYChannel = frame;
+ yuvData.mYSize = IntSize(y_stride, height_);
+ yuvData.mYStride = y_stride;
+ yuvData.mCbCrStride = cbcr_stride;
+ yuvData.mCbChannel = frame + height_ * yuvData.mYStride;
+ yuvData.mCrChannel = yuvData.mCbChannel + ((height_ + 1) >> 1) * yuvData.mCbCrStride;
+ yuvData.mCbCrSize = IntSize(yuvData.mCbCrStride, (height_ + 1) >> 1);
+ yuvData.mPicX = 0;
+ yuvData.mPicY = 0;
+ yuvData.mPicSize = IntSize(width_, height_);
+ yuvData.mStereoMode = StereoMode::MONO;
+
+ if (!yuvImage->CopyData(yuvData)) {
+ MOZ_ASSERT(false);
+ return;
+ }
+
+ image_ = yuvImage;
+ }
+#ifdef WEBRTC_GONK
+ else {
+ // Decoder produced video frame that can be appended to the track directly.
+ MOZ_ASSERT(video_image);
+ image_ = video_image;
+ }
+#endif // WEBRTC_GONK
+#endif // MOZILLA_INTERNAL_API
+ }
+
+private:
+ int width_;
+ int height_;
+#if defined(MOZILLA_INTERNAL_API)
+ RefPtr<layers::ImageContainer> image_container_;
+ RefPtr<layers::Image> image_;
+#endif
+ mozilla::ReentrantMonitor monitor_; // Monitor for processing WebRTC frames.
+ // Protects image_ against:
+ // - Writing from the GIPS thread
+ // - Reading from the MSG thread
+};
+
+class MediaPipelineReceiveVideo::PipelineRenderer : public VideoRenderer
+{
+public:
+ explicit PipelineRenderer(MediaPipelineReceiveVideo *pipeline) :
+ pipeline_(pipeline) {}
+
+ void Detach() { pipeline_ = nullptr; }
+
+ // Implement VideoRenderer
+ void FrameSizeChange(unsigned int width,
+ unsigned int height,
+ unsigned int number_of_streams) override
+ {
+ pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
+ }
+
+ void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const ImageHandle& handle) override
+ {
+ pipeline_->listener_->RenderVideoFrame(buffer, buffer_size,
+ time_stamp, render_time,
+ handle.GetImage());
+ }
+
+ void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t y_stride,
+ uint32_t cbcr_stride,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const ImageHandle& handle) override
+ {
+ pipeline_->listener_->RenderVideoFrame(buffer, buffer_size,
+ y_stride, cbcr_stride,
+ time_stamp, render_time,
+ handle.GetImage());
+ }
+
+private:
+ MediaPipelineReceiveVideo *pipeline_; // Raw pointer to avoid cycles
+};
+
+
+MediaPipelineReceiveVideo::MediaPipelineReceiveVideo(
+ const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ SourceMediaStream *stream,
+ const std::string& media_stream_track_id,
+ TrackID numeric_track_id,
+ int level,
+ RefPtr<VideoSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter) :
+ MediaPipelineReceive(pc, main_thread, sts_thread,
+ stream, media_stream_track_id, level, conduit,
+ rtp_transport, rtcp_transport, filter),
+ renderer_(new PipelineRenderer(this)),
+ listener_(new PipelineListener(stream, numeric_track_id))
+{}
+
+void MediaPipelineReceiveVideo::DetachMedia()
+{
+ ASSERT_ON_THREAD(main_thread_);
+
+ // stop generating video and thus stop invoking the PipelineRenderer
+ // and PipelineListener - the renderer has a raw ptr to the Pipeline to
+ // avoid cycles, and the render callbacks are invoked from a different
+ // thread so simple null-checks would cause TSAN bugs without locks.
+ static_cast<VideoSessionConduit*>(conduit_.get())->DetachRenderer();
+ if (stream_ && listener_) {
+ listener_->EndTrack();
+ stream_->RemoveListener(listener_);
+ stream_ = nullptr;
+ }
+}
+
+nsresult MediaPipelineReceiveVideo::Init() {
+ ASSERT_ON_THREAD(main_thread_);
+ MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
+
+ description_ = pc_ + "| Receive video[";
+ description_ += track_id_;
+ description_ += "]";
+
+#if defined(MOZILLA_INTERNAL_API)
+ listener_->AddSelf();
+#endif
+
+ // Always happens before we can DetachMedia()
+ static_cast<VideoSessionConduit *>(conduit_.get())->
+ AttachRenderer(renderer_);
+
+ return MediaPipelineReceive::Init();
+}
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+void MediaPipelineReceiveVideo::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+{
+ listener_->SetPrincipalHandle_m(principal_handle);
+}
+#endif // USE_FAKE_MEDIA_STREAMS
+
+} // end namespace
diff --git a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
new file mode 100644
index 000000000..d609cbd47
--- /dev/null
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -0,0 +1,479 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: ekr@rtfm.com
+
+#ifndef mediapipeline_h__
+#define mediapipeline_h__
+
+#include "sigslot.h"
+
+#ifdef USE_FAKE_MEDIA_STREAMS
+#include "FakeMediaStreams.h"
+#endif
+#include "MediaConduitInterface.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/Atomics.h"
+#include "SrtpFlow.h"
+#include "databuffer.h"
+#include "runnable_utils.h"
+#include "transportflow.h"
+#include "AudioPacketizer.h"
+#include "StreamTracks.h"
+
+#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+
+// Should come from MediaEngine.h, but that's a pain to include here
+// because of the MOZILLA_EXTERNAL_LINKAGE stuff.
+#define WEBRTC_DEFAULT_SAMPLE_RATE 32000
+
+class nsIPrincipal;
+
+namespace mozilla {
+class MediaPipelineFilter;
+class PeerIdentity;
+class AudioProxyThread;
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+class VideoFrameConverter;
+#endif
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+namespace dom {
+ class MediaStreamTrack;
+} // namespace dom
+
+class SourceMediaStream;
+#endif // USE_FAKE_MEDIA_STREAMS
+
+// A class that represents the pipeline of audio and video
+// The dataflow looks like:
+//
+// TRANSMIT
+// CaptureDevice -> stream -> [us] -> conduit -> [us] -> transport -> network
+//
+// RECEIVE
+// network -> transport -> [us] -> conduit -> [us] -> stream -> Playout
+//
+// The boxes labeled [us] are just bridge logic implemented in this class
+//
+// We have to deal with a number of threads:
+//
+// GSM:
+// * Assembles the pipeline
+// SocketTransportService
+// * Receives notification that ICE and DTLS have completed
+// * Processes incoming network data and passes it to the conduit
+// * Processes outgoing RTP and RTCP
+// MediaStreamGraph
+// * Receives outgoing data from the MediaStreamGraph
+// * Receives pull requests for more data from the
+// MediaStreamGraph
+// One or another GIPS threads
+// * Receives RTCP messages to send to the other side
+// * Processes video frames GIPS wants to render
+//
+// For a transmitting conduit, "output" is RTP and "input" is RTCP.
+// For a receiving conduit, "input" is RTP and "output" is RTCP.
+//
+
+class MediaPipeline : public sigslot::has_slots<> {
+ public:
+ enum Direction { TRANSMIT, RECEIVE };
+ enum State { MP_CONNECTING, MP_OPEN, MP_CLOSED };
+ MediaPipeline(const std::string& pc,
+ Direction direction,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ const std::string& track_id,
+ int level,
+ RefPtr<MediaSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ // Must be called on the STS thread. Must be called after ShutdownMedia_m().
+ void DetachTransport_s();
+
+ // Must be called on the main thread.
+ void ShutdownMedia_m()
+ {
+ ASSERT_ON_THREAD(main_thread_);
+
+ if (direction_ == RECEIVE) {
+ conduit_->StopReceiving();
+ } else {
+ conduit_->StopTransmitting();
+ }
+ DetachMedia();
+ }
+
+ virtual nsresult Init();
+
+ void UpdateTransport_m(int level,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ void UpdateTransport_s(int level,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ // Used only for testing; installs a MediaPipelineFilter that filters
+ // everything but the nth ssrc
+ void SelectSsrc_m(size_t ssrc_index);
+ void SelectSsrc_s(size_t ssrc_index);
+
+ virtual Direction direction() const { return direction_; }
+ virtual const std::string& trackid() const { return track_id_; }
+ virtual int level() const { return level_; }
+ virtual bool IsVideo() const = 0;
+
+ bool IsDoingRtcpMux() const {
+ return (rtp_.type_ == MUX);
+ }
+
+ int32_t rtp_packets_sent() const { return rtp_packets_sent_; }
+ int64_t rtp_bytes_sent() const { return rtp_bytes_sent_; }
+ int32_t rtcp_packets_sent() const { return rtcp_packets_sent_; }
+ int32_t rtp_packets_received() const { return rtp_packets_received_; }
+ int64_t rtp_bytes_received() const { return rtp_bytes_received_; }
+ int32_t rtcp_packets_received() const { return rtcp_packets_received_; }
+
+ MediaSessionConduit *Conduit() const { return conduit_; }
+
+ // Thread counting
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaPipeline)
+
+ typedef enum {
+ RTP,
+ RTCP,
+ MUX,
+ MAX_RTP_TYPE
+ } RtpType;
+
+ protected:
+ virtual ~MediaPipeline();
+ virtual void DetachMedia() {}
+ nsresult AttachTransport_s();
+
+ // Separate class to allow ref counting
+ class PipelineTransport : public TransportInterface {
+ public:
+ // Implement the TransportInterface functions
+ explicit PipelineTransport(MediaPipeline *pipeline)
+ : pipeline_(pipeline),
+ sts_thread_(pipeline->sts_thread_) {}
+
+ void Attach(MediaPipeline *pipeline) { pipeline_ = pipeline; }
+ void Detach() { pipeline_ = nullptr; }
+ MediaPipeline *pipeline() const { return pipeline_; }
+
+ virtual nsresult SendRtpPacket(const void* data, int len);
+ virtual nsresult SendRtcpPacket(const void* data, int len);
+
+ private:
+ nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> data,
+ bool is_rtp);
+
+ MediaPipeline *pipeline_; // Raw pointer to avoid cycles
+ nsCOMPtr<nsIEventTarget> sts_thread_;
+ };
+ friend class PipelineTransport;
+
+ class TransportInfo {
+ public:
+ TransportInfo(RefPtr<TransportFlow> flow, RtpType type) :
+ transport_(flow),
+ state_(MP_CONNECTING),
+ type_(type) {
+ MOZ_ASSERT(flow);
+ }
+
+ void Detach()
+ {
+ transport_ = nullptr;
+ send_srtp_ = nullptr;
+ recv_srtp_ = nullptr;
+ }
+
+ RefPtr<TransportFlow> transport_;
+ State state_;
+ RefPtr<SrtpFlow> send_srtp_;
+ RefPtr<SrtpFlow> recv_srtp_;
+ RtpType type_;
+ };
+
+ // The transport is down
+ virtual nsresult TransportFailed_s(TransportInfo &info);
+ // The transport is ready
+ virtual nsresult TransportReady_s(TransportInfo &info);
+ void UpdateRtcpMuxState(TransportInfo &info);
+
+ // Unhooks from signals
+ void DisconnectTransport_s(TransportInfo &info);
+ nsresult ConnectTransport_s(TransportInfo &info);
+
+ TransportInfo* GetTransportInfo_s(TransportFlow *flow);
+
+ void increment_rtp_packets_sent(int bytes);
+ void increment_rtcp_packets_sent();
+ void increment_rtp_packets_received(int bytes);
+ void increment_rtcp_packets_received();
+
+ virtual nsresult SendPacket(TransportFlow *flow, const void *data, int len);
+
+ // Process slots on transports
+ void StateChange(TransportFlow *flow, TransportLayer::State);
+ void RtpPacketReceived(TransportLayer *layer, const unsigned char *data,
+ size_t len);
+ void RtcpPacketReceived(TransportLayer *layer, const unsigned char *data,
+ size_t len);
+ void PacketReceived(TransportLayer *layer, const unsigned char *data,
+ size_t len);
+
+ Direction direction_;
+ std::string track_id_; // The track on the stream.
+ // Written on the main thread.
+ // Used on STS and MediaStreamGraph threads.
+ // Not used outside initialization in MediaPipelineTransmit
+ // The m-line index (starting at 0, to match convention) Atomic because
+ // this value is updated from STS, but read on main, and we don't want to
+ // bother with dispatches just to get an int occasionally.
+ Atomic<int> level_;
+ RefPtr<MediaSessionConduit> conduit_; // Our conduit. Written on the main
+ // thread. Read on STS thread.
+
+ // The transport objects. Read/written on STS thread.
+ TransportInfo rtp_;
+ TransportInfo rtcp_;
+
+ // Pointers to the threads we need. Initialized at creation
+ // and used all over the place.
+ nsCOMPtr<nsIEventTarget> main_thread_;
+ nsCOMPtr<nsIEventTarget> sts_thread_;
+
+ // Created on Init. Referenced by the conduit and eventually
+ // destroyed on the STS thread.
+ RefPtr<PipelineTransport> transport_;
+
+ // Only safe to access from STS thread.
+ // Build into TransportInfo?
+ int32_t rtp_packets_sent_;
+ int32_t rtcp_packets_sent_;
+ int32_t rtp_packets_received_;
+ int32_t rtcp_packets_received_;
+ int64_t rtp_bytes_sent_;
+ int64_t rtp_bytes_received_;
+
+ std::vector<uint32_t> ssrcs_received_;
+
+ // Written on Init. Read on STS thread.
+ std::string pc_;
+ std::string description_;
+
+ // Written on Init, all following accesses are on the STS thread.
+ nsAutoPtr<MediaPipelineFilter> filter_;
+ nsAutoPtr<webrtc::RtpHeaderParser> rtp_parser_;
+
+ private:
+ nsresult Init_s();
+
+ bool IsRtp(const unsigned char *data, size_t len);
+};
+
+class ConduitDeleteEvent: public Runnable
+{
+public:
+ explicit ConduitDeleteEvent(already_AddRefed<MediaSessionConduit> aConduit) :
+ mConduit(aConduit) {}
+
+ /* we exist solely to proxy release of the conduit */
+ NS_IMETHOD Run() override { return NS_OK; }
+private:
+ RefPtr<MediaSessionConduit> mConduit;
+};
+
+// A specialization of pipeline for reading from an input device
+// and transmitting to the network.
+class MediaPipelineTransmit : public MediaPipeline {
+public:
+ // Set rtcp_transport to nullptr to use rtcp-mux
+ MediaPipelineTransmit(const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ dom::MediaStreamTrack* domtrack,
+ const std::string& track_id,
+ int level,
+ RefPtr<MediaSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ // Initialize (stuff here may fail)
+ nsresult Init() override;
+
+ virtual void AttachToTrack(const std::string& track_id);
+
+ // written and used from MainThread
+ bool IsVideo() const override;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // When the principal of the domtrack changes, it calls through to here
+ // so that we can determine whether to enable track transmission.
+ // `track` has to be null or equal `domtrack_` for us to apply the update.
+ virtual void UpdateSinkIdentity_m(dom::MediaStreamTrack* track,
+ nsIPrincipal* principal,
+ const PeerIdentity* sinkIdentity);
+#endif
+
+ // Called on the main thread.
+ void DetachMedia() override;
+
+ // Override MediaPipeline::TransportReady.
+ nsresult TransportReady_s(TransportInfo &info) override;
+
+ // Replace a track with a different one
+ // In non-compliance with the likely final spec, allow the new
+ // track to be part of a different stream (since we don't support
+ // multiple tracks of a type in a stream yet). bug 1056650
+ virtual nsresult ReplaceTrack(dom::MediaStreamTrack& domtrack);
+
+ // Separate classes to allow ref counting
+ class PipelineListener;
+ class VideoFrameFeeder;
+ class PipelineVideoSink;
+
+ protected:
+ ~MediaPipelineTransmit();
+
+ private:
+ RefPtr<PipelineListener> listener_;
+ RefPtr<AudioProxyThread> audio_processing_;
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<VideoFrameFeeder> feeder_;
+ RefPtr<VideoFrameConverter> converter_;
+#endif
+ RefPtr<PipelineVideoSink> video_sink_;
+ dom::MediaStreamTrack* domtrack_;
+};
+
+
+// A specialization of pipeline for reading from the network and
+// rendering video.
+class MediaPipelineReceive : public MediaPipeline {
+ public:
+ // Set rtcp_transport to nullptr to use rtcp-mux
+ MediaPipelineReceive(const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ SourceMediaStream *stream,
+ const std::string& track_id,
+ int level,
+ RefPtr<MediaSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ int segments_added() const { return segments_added_; }
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+ // Sets the PrincipalHandle we set on the media chunks produced by this
+ // pipeline. Must be called on the main thread.
+ virtual void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) = 0;
+#endif // USE_FAKE_MEDIA_STREAMS
+ protected:
+ ~MediaPipelineReceive();
+
+ RefPtr<SourceMediaStream> stream_;
+ int segments_added_;
+
+ private:
+};
+
+
+// A specialization of pipeline for reading from the network and
+// rendering audio.
+class MediaPipelineReceiveAudio : public MediaPipelineReceive {
+ public:
+ MediaPipelineReceiveAudio(const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ SourceMediaStream* stream,
+ // This comes from an msid attribute. Everywhere
+ // but MediaStreamGraph uses this.
+ const std::string& media_stream_track_id,
+ // This is an integer identifier that is only
+ // unique within a single DOMMediaStream, which is
+ // used by MediaStreamGraph
+ TrackID numeric_track_id,
+ int level,
+ RefPtr<AudioSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ void DetachMedia() override;
+
+ nsresult Init() override;
+ bool IsVideo() const override { return false; }
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+ void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
+#endif // USE_FAKE_MEDIA_STREAMS
+
+ private:
+ // Separate class to allow ref counting
+ class PipelineListener;
+
+ RefPtr<PipelineListener> listener_;
+};
+
+
+// A specialization of pipeline for reading from the network and
+// rendering video.
+class MediaPipelineReceiveVideo : public MediaPipelineReceive {
+ public:
+ MediaPipelineReceiveVideo(const std::string& pc,
+ nsCOMPtr<nsIEventTarget> main_thread,
+ nsCOMPtr<nsIEventTarget> sts_thread,
+ SourceMediaStream *stream,
+ // This comes from an msid attribute. Everywhere
+ // but MediaStreamGraph uses this.
+ const std::string& media_stream_track_id,
+ // This is an integer identifier that is only
+ // unique within a single DOMMediaStream, which is
+ // used by MediaStreamGraph
+ TrackID numeric_track_id,
+ int level,
+ RefPtr<VideoSessionConduit> conduit,
+ RefPtr<TransportFlow> rtp_transport,
+ RefPtr<TransportFlow> rtcp_transport,
+ nsAutoPtr<MediaPipelineFilter> filter);
+
+ // Called on the main thread.
+ void DetachMedia() override;
+
+ nsresult Init() override;
+ bool IsVideo() const override { return true; }
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+ void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
+#endif // USE_FAKE_MEDIA_STREAMS
+
+ private:
+ class PipelineRenderer;
+ friend class PipelineRenderer;
+
+ // Separate class to allow ref counting
+ class PipelineListener;
+
+ RefPtr<PipelineRenderer> renderer_;
+ RefPtr<PipelineListener> listener_;
+};
+
+
+} // namespace mozilla
+#endif
diff --git a/media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.cpp b/media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.cpp
new file mode 100644
index 000000000..b56c272f9
--- /dev/null
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.cpp
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: softtabstop=2:shiftwidth=2:expandtab
+ * */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: bcampen@mozilla.com
+
+#include "MediaPipelineFilter.h"
+
+#include "webrtc/modules/interface/module_common_types.h"
+
+namespace mozilla {
+
+MediaPipelineFilter::MediaPipelineFilter() : correlator_(0) {
+}
+
+bool MediaPipelineFilter::Filter(const webrtc::RTPHeader& header,
+ uint32_t correlator) {
+ if (correlator) {
+ // This special correlator header takes precedence. It also lets us learn
+ // about SSRC mappings if we don't know about them yet.
+ if (correlator == correlator_) {
+ AddRemoteSSRC(header.ssrc);
+ return true;
+ } else {
+ // Some other stream; it is possible that an SSRC has moved, so make sure
+ // we don't have that SSRC in our filter any more.
+ remote_ssrc_set_.erase(header.ssrc);
+ return false;
+ }
+ }
+
+ if (remote_ssrc_set_.count(header.ssrc)) {
+ return true;
+ }
+
+ // Last ditch effort...
+ if (payload_type_set_.count(header.payloadType)) {
+ // Actual match. We need to update the ssrc map so we can route rtcp
+ // sender reports correctly (these use a different payload-type field)
+ AddRemoteSSRC(header.ssrc);
+ return true;
+ }
+
+ return false;
+}
+
+void MediaPipelineFilter::AddRemoteSSRC(uint32_t ssrc) {
+ remote_ssrc_set_.insert(ssrc);
+}
+
+void MediaPipelineFilter::AddUniquePT(uint8_t payload_type) {
+ payload_type_set_.insert(payload_type);
+}
+
+void MediaPipelineFilter::SetCorrelator(uint32_t correlator) {
+ correlator_ = correlator;
+}
+
+void MediaPipelineFilter::Update(const MediaPipelineFilter& filter_update) {
+ // We will not stomp the remote_ssrc_set_ if the update has no ssrcs,
+ // because we don't want to unlearn any remote ssrcs unless the other end
+ // has explicitly given us a new set.
+ if (!filter_update.remote_ssrc_set_.empty()) {
+ remote_ssrc_set_ = filter_update.remote_ssrc_set_;
+ }
+
+ payload_type_set_ = filter_update.payload_type_set_;
+ correlator_ = filter_update.correlator_;
+}
+
+bool
+MediaPipelineFilter::FilterSenderReport(const unsigned char* data,
+ size_t len) const {
+ if (len < FIRST_SSRC_OFFSET + 4) {
+ return false;
+ }
+
+ uint8_t payload_type = data[PT_OFFSET];
+
+ if (payload_type != SENDER_REPORT_T) {
+ return false;
+ }
+
+ uint32_t ssrc = 0;
+ ssrc += (uint32_t)data[FIRST_SSRC_OFFSET] << 24;
+ ssrc += (uint32_t)data[FIRST_SSRC_OFFSET + 1] << 16;
+ ssrc += (uint32_t)data[FIRST_SSRC_OFFSET + 2] << 8;
+ ssrc += (uint32_t)data[FIRST_SSRC_OFFSET + 3];
+
+ return !!remote_ssrc_set_.count(ssrc);
+}
+
+} // end namespace mozilla
+
diff --git a/media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.h b/media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.h
new file mode 100644
index 000000000..31de8ccb2
--- /dev/null
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipelineFilter.h
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: softtabstop=2:shiftwidth=2:expandtab
+ * */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: bcampen@mozilla.com
+
+#ifndef mediapipelinefilter_h__
+#define mediapipelinefilter_h__
+
+#include <cstddef>
+#include <stdint.h>
+
+#include <set>
+
+namespace webrtc {
+struct RTPHeader;
+}
+
+namespace mozilla {
+
+// A class that handles the work of filtering RTP packets that arrive at a
+// MediaPipeline. This is primarily important for the use of BUNDLE (ie;
+// multiple m-lines share the same RTP stream). There are three ways that this
+// can work;
+//
+// 1) In our SDP, we include a media-level extmap parameter with a unique
+// integer of our choosing, with the hope that the other side will include
+// this value in a header in the first few RTP packets it sends us. This
+// allows us to perform correlation in cases where the other side has not
+// informed us of the ssrcs it will be sending (either because it did not
+// include them in its SDP, or their SDP has not arrived yet)
+// and also gives us the opportunity to learn SSRCs from packets so adorned.
+//
+// 2) If the remote endpoint includes SSRC media-level attributes in its SDP,
+// we can simply use this information to populate the filter. The only
+// shortcoming here is when RTP packets arrive before the answer does. See
+// above.
+//
+// 3) As a fallback, we can try to use payload type IDs to perform correlation,
+// but only when the type id is unique to this media section.
+// This too allows us to learn about SSRCs (mostly useful for filtering
+// sender reports later).
+class MediaPipelineFilter {
+ public:
+ MediaPipelineFilter();
+
+ // Checks whether this packet passes the filter, possibly updating the filter
+ // in the process (if the correlator or payload types are used, they can teach
+ // the filter about ssrcs)
+ bool Filter(const webrtc::RTPHeader& header, uint32_t correlator = 0);
+
+ // RTCP doesn't have things like the RTP correlator, and uses its own
+ // payload types too.
+ bool FilterSenderReport(const unsigned char* data, size_t len) const;
+
+ void AddRemoteSSRC(uint32_t ssrc);
+
+ // When a payload type id is unique to our media section, add it here.
+ void AddUniquePT(uint8_t payload_type);
+ void SetCorrelator(uint32_t correlator);
+
+ void Update(const MediaPipelineFilter& filter_update);
+
+ // Some payload types
+ static const uint8_t SENDER_REPORT_T = 200;
+
+ private:
+ // Payload type is always in the second byte
+ static const size_t PT_OFFSET = 1;
+ // First SSRC always starts at the fifth byte.
+ static const size_t FIRST_SSRC_OFFSET = 4;
+
+ uint32_t correlator_;
+ // The number of filters we manage here is quite small, so I am optimizing
+ // for readability.
+ std::set<uint32_t> remote_ssrc_set_;
+ std::set<uint8_t> payload_type_set_;
+};
+
+} // end namespace mozilla
+
+#endif // mediapipelinefilter_h__
+
diff --git a/media/webrtc/signaling/src/mediapipeline/SrtpFlow.cpp b/media/webrtc/signaling/src/mediapipeline/SrtpFlow.cpp
new file mode 100644
index 000000000..59c3dac0a
--- /dev/null
+++ b/media/webrtc/signaling/src/mediapipeline/SrtpFlow.cpp
@@ -0,0 +1,251 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: ekr@rtfm.com
+
+#include "logging.h"
+#include "SrtpFlow.h"
+
+#include "srtp.h"
+#include "ssl.h"
+#include "sslproto.h"
+
+#include "mozilla/RefPtr.h"
+
+// Logging context
+using namespace mozilla;
+MOZ_MTLOG_MODULE("mediapipeline")
+
+namespace mozilla {
+
+bool SrtpFlow::initialized; // Static
+
+SrtpFlow::~SrtpFlow() {
+ if (session_) {
+ srtp_dealloc(session_);
+ }
+}
+
+RefPtr<SrtpFlow> SrtpFlow::Create(int cipher_suite,
+ bool inbound,
+ const void *key,
+ size_t key_len) {
+ nsresult res = Init();
+ if (!NS_SUCCEEDED(res))
+ return nullptr;
+
+ RefPtr<SrtpFlow> flow = new SrtpFlow();
+
+ if (!key) {
+ MOZ_MTLOG(ML_ERROR, "Null SRTP key specified");
+ return nullptr;
+ }
+
+ if (key_len != SRTP_TOTAL_KEY_LENGTH) {
+ MOZ_MTLOG(ML_ERROR, "Invalid SRTP key length");
+ return nullptr;
+ }
+
+ srtp_policy_t policy;
+ memset(&policy, 0, sizeof(srtp_policy_t));
+
+ // Note that we set the same cipher suite for RTP and RTCP
+ // since any flow can only have one cipher suite with DTLS-SRTP
+ switch (cipher_suite) {
+ case SRTP_AES128_CM_HMAC_SHA1_80:
+ MOZ_MTLOG(ML_DEBUG,
+ "Setting SRTP cipher suite SRTP_AES128_CM_HMAC_SHA1_80");
+ crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
+ crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
+ break;
+ case SRTP_AES128_CM_HMAC_SHA1_32:
+ MOZ_MTLOG(ML_DEBUG,
+ "Setting SRTP cipher suite SRTP_AES128_CM_HMAC_SHA1_32");
+ crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp);
+ crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); // 80-bit per RFC 5764
+ break; // S 4.1.2.
+ default:
+ MOZ_MTLOG(ML_ERROR, "Request to set unknown SRTP cipher suite");
+ return nullptr;
+ }
+ // This key is copied into the srtp_t object, so we don't
+ // need to keep it.
+ policy.key = const_cast<unsigned char *>(
+ static_cast<const unsigned char *>(key));
+ policy.ssrc.type = inbound ? ssrc_any_inbound : ssrc_any_outbound;
+ policy.ssrc.value = 0;
+ policy.ekt = nullptr;
+ policy.window_size = 1024; // Use the Chrome value. Needs to be revisited. Default is 128
+ policy.allow_repeat_tx = 1; // Use Chrome value; needed for NACK mode to work
+ policy.next = nullptr;
+
+ // Now make the session
+ err_status_t r = srtp_create(&flow->session_, &policy);
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Error creating srtp session");
+ return nullptr;
+ }
+
+ return flow;
+}
+
+
+nsresult SrtpFlow::CheckInputs(bool protect, void *in, int in_len,
+ int max_len, int *out_len) {
+ MOZ_ASSERT(in);
+ if (!in) {
+ MOZ_MTLOG(ML_ERROR, "NULL input value");
+ return NS_ERROR_NULL_POINTER;
+ }
+
+ if (in_len < 0) {
+ MOZ_MTLOG(ML_ERROR, "Input length is negative");
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ if (max_len < 0) {
+ MOZ_MTLOG(ML_ERROR, "Max output length is negative");
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ if (protect) {
+ if ((max_len < SRTP_MAX_EXPANSION) ||
+ ((max_len - SRTP_MAX_EXPANSION) < in_len)) {
+ MOZ_MTLOG(ML_ERROR, "Output too short");
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+ }
+ else {
+ if (in_len > max_len) {
+ MOZ_MTLOG(ML_ERROR, "Output too short");
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult SrtpFlow::ProtectRtp(void *in, int in_len,
+ int max_len, int *out_len) {
+ nsresult res = CheckInputs(true, in, in_len, max_len, out_len);
+ if (NS_FAILED(res))
+ return res;
+
+ int len = in_len;
+ err_status_t r = srtp_protect(session_, in, &len);
+
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Error protecting SRTP packet");
+ return NS_ERROR_FAILURE;
+ }
+
+ MOZ_ASSERT(len <= max_len);
+ *out_len = len;
+
+
+ MOZ_MTLOG(ML_DEBUG, "Successfully protected an SRTP packet of len "
+ << *out_len);
+
+ return NS_OK;
+}
+
+nsresult SrtpFlow::UnprotectRtp(void *in, int in_len,
+ int max_len, int *out_len) {
+ nsresult res = CheckInputs(false, in, in_len, max_len, out_len);
+ if (NS_FAILED(res))
+ return res;
+
+ int len = in_len;
+ err_status_t r = srtp_unprotect(session_, in, &len);
+
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Error unprotecting SRTP packet error=" << (int)r);
+ return NS_ERROR_FAILURE;
+ }
+
+ MOZ_ASSERT(len <= max_len);
+ *out_len = len;
+
+ MOZ_MTLOG(ML_DEBUG, "Successfully unprotected an SRTP packet of len "
+ << *out_len);
+
+ return NS_OK;
+}
+
+nsresult SrtpFlow::ProtectRtcp(void *in, int in_len,
+ int max_len, int *out_len) {
+ nsresult res = CheckInputs(true, in, in_len, max_len, out_len);
+ if (NS_FAILED(res))
+ return res;
+
+ int len = in_len;
+ err_status_t r = srtp_protect_rtcp(session_, in, &len);
+
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Error protecting SRTCP packet");
+ return NS_ERROR_FAILURE;
+ }
+
+ MOZ_ASSERT(len <= max_len);
+ *out_len = len;
+
+ MOZ_MTLOG(ML_DEBUG, "Successfully protected an SRTCP packet of len "
+ << *out_len);
+
+ return NS_OK;
+}
+
+nsresult SrtpFlow::UnprotectRtcp(void *in, int in_len,
+ int max_len, int *out_len) {
+ nsresult res = CheckInputs(false, in, in_len, max_len, out_len);
+ if (NS_FAILED(res))
+ return res;
+
+ int len = in_len;
+ err_status_t r = srtp_unprotect_rtcp(session_, in, &len);
+
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Error unprotecting SRTCP packet error=" << (int)r);
+ return NS_ERROR_FAILURE;
+ }
+
+ MOZ_ASSERT(len <= max_len);
+ *out_len = len;
+
+ MOZ_MTLOG(ML_DEBUG, "Successfully unprotected an SRTCP packet of len "
+ << *out_len);
+
+ return NS_OK;
+}
+
+// Statics
+void SrtpFlow::srtp_event_handler(srtp_event_data_t *data) {
+ // TODO(ekr@rtfm.com): Implement this
+ MOZ_CRASH();
+}
+
+nsresult SrtpFlow::Init() {
+ if (!initialized) {
+ err_status_t r = srtp_init();
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Could not initialize SRTP");
+ MOZ_ASSERT(PR_FALSE);
+ return NS_ERROR_FAILURE;
+ }
+
+ r = srtp_install_event_handler(&SrtpFlow::srtp_event_handler);
+ if (r != err_status_ok) {
+ MOZ_MTLOG(ML_ERROR, "Could not install SRTP event handler");
+ MOZ_ASSERT(PR_FALSE);
+ return NS_ERROR_FAILURE;
+ }
+
+ initialized = true;
+ }
+
+ return NS_OK;
+}
+
+} // end of namespace
+
diff --git a/media/webrtc/signaling/src/mediapipeline/SrtpFlow.h b/media/webrtc/signaling/src/mediapipeline/SrtpFlow.h
new file mode 100644
index 000000000..9bb9c2a67
--- /dev/null
+++ b/media/webrtc/signaling/src/mediapipeline/SrtpFlow.h
@@ -0,0 +1,68 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: ekr@rtfm.com
+
+#ifndef srtpflow_h__
+#define srtpflow_h__
+
+#include "ssl.h"
+#include "sslproto.h"
+#include "mozilla/RefPtr.h"
+#include "nsISupportsImpl.h"
+
+typedef struct srtp_policy_t srtp_policy_t;
+typedef struct srtp_ctx_t *srtp_t;
+typedef struct srtp_event_data_t srtp_event_data_t;
+
+namespace mozilla {
+
+#define SRTP_MASTER_KEY_LENGTH 16
+#define SRTP_MASTER_SALT_LENGTH 14
+#define SRTP_TOTAL_KEY_LENGTH (SRTP_MASTER_KEY_LENGTH + SRTP_MASTER_SALT_LENGTH)
+
+// SRTCP requires an auth tag *plus* a 4-byte index-plus-'E'-bit value (see
+// RFC 3711)
+#define SRTP_MAX_EXPANSION (SRTP_MAX_TRAILER_LEN+4)
+
+
+class SrtpFlow {
+ ~SrtpFlow();
+ public:
+
+
+ static RefPtr<SrtpFlow> Create(int cipher_suite,
+ bool inbound,
+ const void *key,
+ size_t key_len);
+
+ nsresult ProtectRtp(void *in, int in_len,
+ int max_len, int *out_len);
+ nsresult UnprotectRtp(void *in, int in_len,
+ int max_len, int *out_len);
+ nsresult ProtectRtcp(void *in, int in_len,
+ int max_len, int *out_len);
+ nsresult UnprotectRtcp(void *in, int in_len,
+ int max_len, int *out_len);
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SrtpFlow)
+
+ static void srtp_event_handler(srtp_event_data_t *data);
+
+
+ private:
+ SrtpFlow() : session_(nullptr) {}
+
+ nsresult CheckInputs(bool protect, void *in, int in_len,
+ int max_len, int *out_len);
+
+ static nsresult Init();
+ static bool initialized; // Was libsrtp initialized? Only happens once.
+
+ srtp_t session_;
+};
+
+} // End of namespace
+#endif
+
diff --git a/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp b/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp
new file mode 100644
index 000000000..61c2719cd
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.cpp
@@ -0,0 +1,1076 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "logging.h"
+#include "nsIGfxInfo.h"
+#include "nsServiceManagerUtils.h"
+
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionMedia.h"
+#include "MediaPipelineFactory.h"
+#include "MediaPipelineFilter.h"
+#include "transportflow.h"
+#include "transportlayer.h"
+#include "transportlayerdtls.h"
+#include "transportlayerice.h"
+
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepTransport.h"
+#include "signaling/src/common/PtrVector.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "MediaStreamTrack.h"
+#include "nsIPrincipal.h"
+#include "nsIDocument.h"
+#include "mozilla/Preferences.h"
+#include "MediaEngine.h"
+#endif
+
+#include "GmpVideoCodec.h"
+#ifdef MOZ_WEBRTC_OMX
+#include "OMXVideoCodec.h"
+#include "OMXCodecWrapper.h"
+#endif
+
+#ifdef MOZ_WEBRTC_MEDIACODEC
+#include "MediaCodecVideoCodec.h"
+#endif
+
+#ifdef MOZILLA_INTERNAL_API
+#include "mozilla/Preferences.h"
+#endif
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "WebrtcGmpVideoCodec.h"
+#endif
+
+#include <stdlib.h>
+
+namespace mozilla {
+
+MOZ_MTLOG_MODULE("MediaPipelineFactory")
+
+static nsresult
+JsepCodecDescToCodecConfig(const JsepCodecDescription& aCodec,
+ AudioCodecConfig** aConfig)
+{
+ MOZ_ASSERT(aCodec.mType == SdpMediaSection::kAudio);
+ if (aCodec.mType != SdpMediaSection::kAudio)
+ return NS_ERROR_INVALID_ARG;
+
+ const JsepAudioCodecDescription& desc =
+ static_cast<const JsepAudioCodecDescription&>(aCodec);
+
+ uint16_t pt;
+
+ if (!desc.GetPtAsInt(&pt)) {
+ MOZ_MTLOG(ML_ERROR, "Invalid payload type: " << desc.mDefaultPt);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ *aConfig = new AudioCodecConfig(pt,
+ desc.mName,
+ desc.mClock,
+ desc.mPacketSize,
+ desc.mForceMono ? 1 : desc.mChannels,
+ desc.mBitrate,
+ desc.mFECEnabled);
+ (*aConfig)->mMaxPlaybackRate = desc.mMaxPlaybackRate;
+ (*aConfig)->mDtmfEnabled = desc.mDtmfEnabled;
+
+ return NS_OK;
+}
+
+static std::vector<JsepCodecDescription*>
+GetCodecs(const JsepTrackNegotiatedDetails& aDetails)
+{
+ // We do not try to handle cases where a codec is not used on the primary
+ // encoding.
+ if (aDetails.GetEncodingCount()) {
+ return aDetails.GetEncoding(0).GetCodecs();
+ }
+ return std::vector<JsepCodecDescription*>();
+}
+
+static nsresult
+NegotiatedDetailsToAudioCodecConfigs(const JsepTrackNegotiatedDetails& aDetails,
+ PtrVector<AudioCodecConfig>* aConfigs)
+{
+ std::vector<JsepCodecDescription*> codecs(GetCodecs(aDetails));
+ for (const JsepCodecDescription* codec : codecs) {
+ AudioCodecConfig* config;
+ if (NS_FAILED(JsepCodecDescToCodecConfig(*codec, &config))) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ aConfigs->values.push_back(config);
+ }
+ return NS_OK;
+}
+
+static nsresult
+JsepCodecDescToCodecConfig(const JsepCodecDescription& aCodec,
+ VideoCodecConfig** aConfig)
+{
+ MOZ_ASSERT(aCodec.mType == SdpMediaSection::kVideo);
+ if (aCodec.mType != SdpMediaSection::kVideo) {
+ MOZ_ASSERT(false, "JsepCodecDescription has wrong type");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const JsepVideoCodecDescription& desc =
+ static_cast<const JsepVideoCodecDescription&>(aCodec);
+
+ uint16_t pt;
+
+ if (!desc.GetPtAsInt(&pt)) {
+ MOZ_MTLOG(ML_ERROR, "Invalid payload type: " << desc.mDefaultPt);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ UniquePtr<VideoCodecConfigH264> h264Config;
+
+ if (desc.mName == "H264") {
+ h264Config = MakeUnique<VideoCodecConfigH264>();
+ size_t spropSize = sizeof(h264Config->sprop_parameter_sets);
+ strncpy(h264Config->sprop_parameter_sets,
+ desc.mSpropParameterSets.c_str(),
+ spropSize);
+ h264Config->sprop_parameter_sets[spropSize - 1] = '\0';
+ h264Config->packetization_mode = desc.mPacketizationMode;
+ h264Config->profile_level_id = desc.mProfileLevelId;
+ h264Config->tias_bw = 0; // TODO. Issue 165.
+ }
+
+ VideoCodecConfig* configRaw;
+ configRaw = new VideoCodecConfig(
+ pt, desc.mName, desc.mConstraints, h264Config.get());
+
+ configRaw->mAckFbTypes = desc.mAckFbTypes;
+ configRaw->mNackFbTypes = desc.mNackFbTypes;
+ configRaw->mCcmFbTypes = desc.mCcmFbTypes;
+ configRaw->mRembFbSet = desc.RtcpFbRembIsSet();
+ configRaw->mFECFbSet = desc.mFECEnabled;
+
+ *aConfig = configRaw;
+ return NS_OK;
+}
+
+static nsresult
+NegotiatedDetailsToVideoCodecConfigs(const JsepTrackNegotiatedDetails& aDetails,
+ PtrVector<VideoCodecConfig>* aConfigs)
+{
+ std::vector<JsepCodecDescription*> codecs(GetCodecs(aDetails));
+ for (const JsepCodecDescription* codec : codecs) {
+ VideoCodecConfig* config;
+ if (NS_FAILED(JsepCodecDescToCodecConfig(*codec, &config))) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ for (size_t i = 0; i < aDetails.GetEncodingCount(); ++i) {
+ const JsepTrackEncoding& jsepEncoding(aDetails.GetEncoding(i));
+ if (jsepEncoding.HasFormat(codec->mDefaultPt)) {
+ VideoCodecConfig::SimulcastEncoding encoding;
+ encoding.rid = jsepEncoding.mRid;
+ encoding.constraints = jsepEncoding.mConstraints;
+ config->mSimulcastEncodings.push_back(encoding);
+ }
+ }
+ aConfigs->values.push_back(config);
+ }
+
+ return NS_OK;
+}
+
+// Accessing the PCMedia should be safe here because we shouldn't
+// have enqueued this function unless it was still active and
+// the ICE data is destroyed on the STS.
+static void
+FinalizeTransportFlow_s(RefPtr<PeerConnectionMedia> aPCMedia,
+ RefPtr<TransportFlow> aFlow, size_t aLevel,
+ bool aIsRtcp,
+ nsAutoPtr<PtrVector<TransportLayer> > aLayerList)
+{
+ TransportLayerIce* ice =
+ static_cast<TransportLayerIce*>(aLayerList->values.front());
+ ice->SetParameters(aPCMedia->ice_ctx(),
+ aPCMedia->ice_media_stream(aLevel),
+ aIsRtcp ? 2 : 1);
+ nsAutoPtr<std::queue<TransportLayer*> > layerQueue(
+ new std::queue<TransportLayer*>);
+ for (auto i = aLayerList->values.begin(); i != aLayerList->values.end();
+ ++i) {
+ layerQueue->push(*i);
+ }
+ aLayerList->values.clear();
+ (void)aFlow->PushLayers(layerQueue); // TODO(bug 854518): Process errors.
+}
+
+static void
+AddNewIceStreamForRestart_s(RefPtr<PeerConnectionMedia> aPCMedia,
+ RefPtr<TransportFlow> aFlow,
+ size_t aLevel,
+ bool aIsRtcp)
+{
+ TransportLayerIce* ice =
+ static_cast<TransportLayerIce*>(aFlow->GetLayer("ice"));
+ ice->SetParameters(aPCMedia->ice_ctx(),
+ aPCMedia->ice_media_stream(aLevel),
+ aIsRtcp ? 2 : 1);
+}
+
+nsresult
+MediaPipelineFactory::CreateOrGetTransportFlow(
+ size_t aLevel,
+ bool aIsRtcp,
+ const JsepTransport& aTransport,
+ RefPtr<TransportFlow>* aFlowOutparam)
+{
+ nsresult rv;
+ RefPtr<TransportFlow> flow;
+
+ flow = mPCMedia->GetTransportFlow(aLevel, aIsRtcp);
+ if (flow) {
+ if (mPCMedia->IsIceRestarting()) {
+ MOZ_MTLOG(ML_INFO, "Flow[" << flow->id() << "]: "
+ << "detected ICE restart - level: "
+ << aLevel << " rtcp: " << aIsRtcp);
+
+ rv = mPCMedia->GetSTSThread()->Dispatch(
+ WrapRunnableNM(AddNewIceStreamForRestart_s,
+ mPCMedia, flow, aLevel, aIsRtcp),
+ NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to dispatch AddNewIceStreamForRestart_s");
+ return rv;
+ }
+ }
+
+ *aFlowOutparam = flow;
+ return NS_OK;
+ }
+
+ std::ostringstream osId;
+ osId << mPC->GetHandle() << ":" << aLevel << ","
+ << (aIsRtcp ? "rtcp" : "rtp");
+ flow = new TransportFlow(osId.str());
+
+ // The media streams are made on STS so we need to defer setup.
+ auto ice = MakeUnique<TransportLayerIce>(mPC->GetHandle());
+ auto dtls = MakeUnique<TransportLayerDtls>();
+ dtls->SetRole(aTransport.mDtls->GetRole() ==
+ JsepDtlsTransport::kJsepDtlsClient
+ ? TransportLayerDtls::CLIENT
+ : TransportLayerDtls::SERVER);
+
+ RefPtr<DtlsIdentity> pcid = mPC->Identity();
+ if (!pcid) {
+ MOZ_MTLOG(ML_ERROR, "Failed to get DTLS identity.");
+ return NS_ERROR_FAILURE;
+ }
+ dtls->SetIdentity(pcid);
+
+ const SdpFingerprintAttributeList& fingerprints =
+ aTransport.mDtls->GetFingerprints();
+ for (auto fp = fingerprints.mFingerprints.begin();
+ fp != fingerprints.mFingerprints.end();
+ ++fp) {
+ std::ostringstream ss;
+ ss << fp->hashFunc;
+ rv = dtls->SetVerificationDigest(ss.str(), &fp->fingerprint[0],
+ fp->fingerprint.size());
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Could not set fingerprint");
+ return rv;
+ }
+ }
+
+ std::vector<uint16_t> srtpCiphers;
+ srtpCiphers.push_back(SRTP_AES128_CM_HMAC_SHA1_80);
+ srtpCiphers.push_back(SRTP_AES128_CM_HMAC_SHA1_32);
+
+ rv = dtls->SetSrtpCiphers(srtpCiphers);
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't set SRTP ciphers");
+ return rv;
+ }
+
+ // Always permits negotiation of the confidential mode.
+ // Only allow non-confidential (which is an allowed default),
+ // if we aren't confidential.
+ std::set<std::string> alpn;
+ std::string alpnDefault = "";
+ alpn.insert("c-webrtc");
+ if (!mPC->PrivacyRequested()) {
+ alpnDefault = "webrtc";
+ alpn.insert(alpnDefault);
+ }
+ rv = dtls->SetAlpn(alpn, alpnDefault);
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't set ALPN");
+ return rv;
+ }
+
+ nsAutoPtr<PtrVector<TransportLayer> > layers(new PtrVector<TransportLayer>);
+ layers->values.push_back(ice.release());
+ layers->values.push_back(dtls.release());
+
+ rv = mPCMedia->GetSTSThread()->Dispatch(
+ WrapRunnableNM(FinalizeTransportFlow_s, mPCMedia, flow, aLevel, aIsRtcp,
+ layers),
+ NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to dispatch FinalizeTransportFlow_s");
+ return rv;
+ }
+
+ mPCMedia->AddTransportFlow(aLevel, aIsRtcp, flow);
+
+ *aFlowOutparam = flow;
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::GetTransportParameters(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ size_t* aLevelOut,
+ RefPtr<TransportFlow>* aRtpOut,
+ RefPtr<TransportFlow>* aRtcpOut,
+ nsAutoPtr<MediaPipelineFilter>* aFilterOut)
+{
+ *aLevelOut = aTrackPair.mLevel;
+
+ size_t transportLevel = aTrackPair.mBundleLevel.isSome() ?
+ *aTrackPair.mBundleLevel :
+ aTrackPair.mLevel;
+
+ nsresult rv = CreateOrGetTransportFlow(
+ transportLevel, false, *aTrackPair.mRtpTransport, aRtpOut);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ MOZ_ASSERT(aRtpOut);
+
+ if (aTrackPair.mRtcpTransport) {
+ rv = CreateOrGetTransportFlow(
+ transportLevel, true, *aTrackPair.mRtcpTransport, aRtcpOut);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ MOZ_ASSERT(aRtcpOut);
+ }
+
+ if (aTrackPair.mBundleLevel.isSome()) {
+ bool receiving = aTrack.GetDirection() == sdp::kRecv;
+
+ *aFilterOut = new MediaPipelineFilter;
+
+ if (receiving) {
+ // Add remote SSRCs so we can distinguish which RTP packets actually
+ // belong to this pipeline (also RTCP sender reports).
+ for (auto i = aTrack.GetSsrcs().begin();
+ i != aTrack.GetSsrcs().end(); ++i) {
+ (*aFilterOut)->AddRemoteSSRC(*i);
+ }
+
+ // TODO(bug 1105005): Tell the filter about the mid for this track
+
+ // Add unique payload types as a last-ditch fallback
+ auto uniquePts = aTrack.GetNegotiatedDetails()->GetUniquePayloadTypes();
+ for (auto i = uniquePts.begin(); i != uniquePts.end(); ++i) {
+ (*aFilterOut)->AddUniquePT(*i);
+ }
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::CreateOrUpdateMediaPipeline(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack)
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // The GMP code is all the way on the other side of webrtc.org, and it is not
+ // feasible to plumb this information all the way through. So, we set it (for
+ // the duration of this call) in a global variable. This allows the GMP code
+ // to report errors to the PC.
+ WebrtcGmpPCHandleSetter setter(mPC->GetHandle());
+#endif
+
+ MOZ_ASSERT(aTrackPair.mRtpTransport);
+
+ bool receiving = aTrack.GetDirection() == sdp::kRecv;
+
+ size_t level;
+ RefPtr<TransportFlow> rtpFlow;
+ RefPtr<TransportFlow> rtcpFlow;
+ nsAutoPtr<MediaPipelineFilter> filter;
+
+ nsresult rv = GetTransportParameters(aTrackPair,
+ aTrack,
+ &level,
+ &rtpFlow,
+ &rtcpFlow,
+ &filter);
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to get transport parameters for pipeline, rv="
+ << static_cast<unsigned>(rv));
+ return rv;
+ }
+
+ if (aTrack.GetMediaType() == SdpMediaSection::kApplication) {
+ // GetTransportParameters has already done everything we need for
+ // datachannel.
+ return NS_OK;
+ }
+
+ // Find the stream we need
+ SourceStreamInfo* stream;
+ if (receiving) {
+ stream = mPCMedia->GetRemoteStreamById(aTrack.GetStreamId());
+ } else {
+ stream = mPCMedia->GetLocalStreamById(aTrack.GetStreamId());
+ }
+
+ if (!stream) {
+ MOZ_MTLOG(ML_ERROR, "Negotiated " << (receiving ? "recv" : "send")
+ << " stream id " << aTrack.GetStreamId() << " was never added");
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!stream->HasTrack(aTrack.GetTrackId())) {
+ MOZ_MTLOG(ML_ERROR, "Negotiated " << (receiving ? "recv" : "send")
+ << " track id " << aTrack.GetTrackId() << " was never added");
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<MediaSessionConduit> conduit;
+ if (aTrack.GetMediaType() == SdpMediaSection::kAudio) {
+ rv = GetOrCreateAudioConduit(aTrackPair, aTrack, &conduit);
+ if (NS_FAILED(rv))
+ return rv;
+ } else if (aTrack.GetMediaType() == SdpMediaSection::kVideo) {
+ rv = GetOrCreateVideoConduit(aTrackPair, aTrack, &conduit);
+ if (NS_FAILED(rv))
+ return rv;
+ } else {
+ // We've created the TransportFlow, nothing else to do here.
+ return NS_OK;
+ }
+
+ if (aTrack.GetActive()) {
+ if (receiving) {
+ auto error = conduit->StartReceiving();
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "StartReceiving failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ auto error = conduit->StartTransmitting();
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "StartTransmitting failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ }
+ } else {
+ if (receiving) {
+ auto error = conduit->StopReceiving();
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "StopReceiving failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ auto error = conduit->StopTransmitting();
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "StopTransmitting failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ }
+ }
+
+ RefPtr<MediaPipeline> pipeline =
+ stream->GetPipelineByTrackId_m(aTrack.GetTrackId());
+
+ if (pipeline && pipeline->level() != static_cast<int>(level)) {
+ MOZ_MTLOG(ML_WARNING, "Track " << aTrack.GetTrackId() <<
+ " has moved from level " << pipeline->level() <<
+ " to level " << level <<
+ ". This requires re-creating the MediaPipeline.");
+ RefPtr<dom::MediaStreamTrack> domTrack =
+ stream->GetTrackById(aTrack.GetTrackId());
+ MOZ_ASSERT(domTrack, "MediaPipeline existed for a track, but no MediaStreamTrack");
+
+ // Since we do not support changing the conduit on a pre-existing
+ // MediaPipeline
+ pipeline = nullptr;
+ stream->RemoveTrack(aTrack.GetTrackId());
+ stream->AddTrack(aTrack.GetTrackId(), domTrack);
+ }
+
+ if (pipeline) {
+ pipeline->UpdateTransport_m(level, rtpFlow, rtcpFlow, filter);
+ return NS_OK;
+ }
+
+ MOZ_MTLOG(ML_DEBUG,
+ "Creating media pipeline"
+ << " m-line index=" << aTrackPair.mLevel
+ << " type=" << aTrack.GetMediaType()
+ << " direction=" << aTrack.GetDirection());
+
+ if (receiving) {
+ rv = CreateMediaPipelineReceiving(aTrackPair, aTrack,
+ level, rtpFlow, rtcpFlow, filter,
+ conduit);
+ if (NS_FAILED(rv))
+ return rv;
+ } else {
+ rv = CreateMediaPipelineSending(aTrackPair, aTrack,
+ level, rtpFlow, rtcpFlow, filter,
+ conduit);
+ if (NS_FAILED(rv))
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::CreateMediaPipelineReceiving(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ size_t aLevel,
+ RefPtr<TransportFlow> aRtpFlow,
+ RefPtr<TransportFlow> aRtcpFlow,
+ nsAutoPtr<MediaPipelineFilter> aFilter,
+ const RefPtr<MediaSessionConduit>& aConduit)
+{
+ // We will error out earlier if this isn't here.
+ RefPtr<RemoteSourceStreamInfo> stream =
+ mPCMedia->GetRemoteStreamById(aTrack.GetStreamId());
+
+ RefPtr<MediaPipelineReceive> pipeline;
+
+ TrackID numericTrackId = stream->GetNumericTrackId(aTrack.GetTrackId());
+ MOZ_ASSERT(IsTrackIDExplicit(numericTrackId));
+
+ MOZ_MTLOG(ML_DEBUG, __FUNCTION__ << ": Creating pipeline for "
+ << numericTrackId << " -> " << aTrack.GetTrackId());
+
+ if (aTrack.GetMediaType() == SdpMediaSection::kAudio) {
+ pipeline = new MediaPipelineReceiveAudio(
+ mPC->GetHandle(),
+ mPC->GetMainThread().get(),
+ mPC->GetSTSThread(),
+ stream->GetMediaStream()->GetInputStream()->AsSourceStream(),
+ aTrack.GetTrackId(),
+ numericTrackId,
+ aLevel,
+ static_cast<AudioSessionConduit*>(aConduit.get()), // Ugly downcast.
+ aRtpFlow,
+ aRtcpFlow,
+ aFilter);
+ } else if (aTrack.GetMediaType() == SdpMediaSection::kVideo) {
+ pipeline = new MediaPipelineReceiveVideo(
+ mPC->GetHandle(),
+ mPC->GetMainThread().get(),
+ mPC->GetSTSThread(),
+ stream->GetMediaStream()->GetInputStream()->AsSourceStream(),
+ aTrack.GetTrackId(),
+ numericTrackId,
+ aLevel,
+ static_cast<VideoSessionConduit*>(aConduit.get()), // Ugly downcast.
+ aRtpFlow,
+ aRtcpFlow,
+ aFilter);
+ } else {
+ MOZ_ASSERT(false);
+ MOZ_MTLOG(ML_ERROR, "Invalid media type in CreateMediaPipelineReceiving");
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult rv = pipeline->Init();
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't initialize receiving pipeline");
+ return rv;
+ }
+
+ rv = stream->StorePipeline(aTrack.GetTrackId(),
+ RefPtr<MediaPipeline>(pipeline));
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't store receiving pipeline " <<
+ static_cast<unsigned>(rv));
+ return rv;
+ }
+
+ stream->SyncPipeline(pipeline);
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::CreateMediaPipelineSending(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ size_t aLevel,
+ RefPtr<TransportFlow> aRtpFlow,
+ RefPtr<TransportFlow> aRtcpFlow,
+ nsAutoPtr<MediaPipelineFilter> aFilter,
+ const RefPtr<MediaSessionConduit>& aConduit)
+{
+ nsresult rv;
+
+ // This is checked earlier
+ RefPtr<LocalSourceStreamInfo> stream =
+ mPCMedia->GetLocalStreamById(aTrack.GetStreamId());
+
+ dom::MediaStreamTrack* track =
+ stream->GetTrackById(aTrack.GetTrackId());
+ MOZ_ASSERT(track);
+
+ // Now we have all the pieces, create the pipeline
+ RefPtr<MediaPipelineTransmit> pipeline = new MediaPipelineTransmit(
+ mPC->GetHandle(),
+ mPC->GetMainThread().get(),
+ mPC->GetSTSThread(),
+ track,
+ aTrack.GetTrackId(),
+ aLevel,
+ aConduit,
+ aRtpFlow,
+ aRtcpFlow,
+ aFilter);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // implement checking for peerIdentity (where failure == black/silence)
+ nsIDocument* doc = mPC->GetWindow()->GetExtantDoc();
+ if (doc) {
+ pipeline->UpdateSinkIdentity_m(track,
+ doc->NodePrincipal(),
+ mPC->GetPeerIdentity());
+ } else {
+ MOZ_MTLOG(ML_ERROR, "Cannot initialize pipeline without attached doc");
+ return NS_ERROR_FAILURE; // Don't remove this till we know it's safe.
+ }
+#endif
+
+ rv = pipeline->Init();
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't initialize sending pipeline");
+ return rv;
+ }
+
+ rv = stream->StorePipeline(aTrack.GetTrackId(),
+ RefPtr<MediaPipeline>(pipeline));
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Couldn't store receiving pipeline " <<
+ static_cast<unsigned>(rv));
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::GetOrCreateAudioConduit(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ RefPtr<MediaSessionConduit>* aConduitp)
+{
+
+ if (!aTrack.GetNegotiatedDetails()) {
+ MOZ_ASSERT(false, "Track is missing negotiated details");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ bool receiving = aTrack.GetDirection() == sdp::kRecv;
+
+ RefPtr<AudioSessionConduit> conduit =
+ mPCMedia->GetAudioConduit(aTrackPair.mLevel);
+
+ if (!conduit) {
+ conduit = AudioSessionConduit::Create();
+ if (!conduit) {
+ MOZ_MTLOG(ML_ERROR, "Could not create audio conduit");
+ return NS_ERROR_FAILURE;
+ }
+
+ mPCMedia->AddAudioConduit(aTrackPair.mLevel, conduit);
+ }
+
+ PtrVector<AudioCodecConfig> configs;
+ nsresult rv = NegotiatedDetailsToAudioCodecConfigs(
+ *aTrack.GetNegotiatedDetails(), &configs);
+
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to convert JsepCodecDescriptions to "
+ "AudioCodecConfigs.");
+ return rv;
+ }
+
+ if (configs.values.empty()) {
+ MOZ_MTLOG(ML_ERROR, "Can't set up a conduit with 0 codecs");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (receiving) {
+ auto error = conduit->ConfigureRecvMediaCodecs(configs.values);
+
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "ConfigureRecvMediaCodecs failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!aTrackPair.mSending) {
+ // No send track, but we still need to configure an SSRC for receiver
+ // reports.
+ if (!conduit->SetLocalSSRC(aTrackPair.mRecvonlySsrc)) {
+ MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
+ return NS_ERROR_FAILURE;
+ }
+ }
+ } else {
+ // For now we only expect to have one ssrc per local track.
+ auto ssrcs = aTrack.GetSsrcs();
+ if (!ssrcs.empty()) {
+ if (!conduit->SetLocalSSRC(ssrcs.front())) {
+ MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ conduit->SetLocalCNAME(aTrack.GetCNAME().c_str());
+
+ if (configs.values.size() > 1
+ && configs.values.back()->mName == "telephone-event") {
+ // we have a telephone event codec, so we need to make sure
+ // the dynamic pt is set properly
+ conduit->SetDtmfPayloadType(configs.values.back()->mType);
+ }
+
+ auto error = conduit->ConfigureSendMediaCodec(configs.values[0]);
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "ConfigureSendMediaCodec failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+
+ const SdpExtmapAttributeList::Extmap* audioLevelExt =
+ aTrack.GetNegotiatedDetails()->GetExt(
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level");
+
+ if (audioLevelExt) {
+ MOZ_MTLOG(ML_DEBUG, "Calling EnableAudioLevelExtension");
+ error = conduit->EnableAudioLevelExtension(true, audioLevelExt->entry);
+
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "EnableAudioLevelExtension failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ }
+ }
+
+ *aConduitp = conduit;
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::GetOrCreateVideoConduit(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ RefPtr<MediaSessionConduit>* aConduitp)
+{
+
+ if (!aTrack.GetNegotiatedDetails()) {
+ MOZ_ASSERT(false, "Track is missing negotiated details");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ bool receiving = aTrack.GetDirection() == sdp::kRecv;
+
+ RefPtr<VideoSessionConduit> conduit =
+ mPCMedia->GetVideoConduit(aTrackPair.mLevel);
+
+ if (!conduit) {
+ conduit = VideoSessionConduit::Create();
+ if (!conduit) {
+ MOZ_MTLOG(ML_ERROR, "Could not create video conduit");
+ return NS_ERROR_FAILURE;
+ }
+
+ mPCMedia->AddVideoConduit(aTrackPair.mLevel, conduit);
+ }
+
+ PtrVector<VideoCodecConfig> configs;
+ nsresult rv = NegotiatedDetailsToVideoCodecConfigs(
+ *aTrack.GetNegotiatedDetails(), &configs);
+
+ if (NS_FAILED(rv)) {
+ MOZ_MTLOG(ML_ERROR, "Failed to convert JsepCodecDescriptions to "
+ "VideoCodecConfigs.");
+ return rv;
+ }
+
+ if (configs.values.empty()) {
+ MOZ_MTLOG(ML_ERROR, "Can't set up a conduit with 0 codecs");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (receiving) {
+ if (aTrackPair.mSending) {
+ auto ssrcs = &aTrackPair.mSending->GetSsrcs();
+ if (!ssrcs->empty()) {
+ if (!conduit->SetLocalSSRC(ssrcs->front())) {
+ MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed(1)");
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ MOZ_MTLOG(ML_ERROR, "Sending without an SSRC??");
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ // No send track, but we still need to configure an SSRC for receiver
+ // reports.
+ if (!conduit->SetLocalSSRC(aTrackPair.mRecvonlySsrc)) {
+ MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed(2)");
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // Prune out stuff we cannot actually do. We should work to eliminate the
+ // need for this.
+ bool configuredH264 = false;
+ for (size_t i = 0; i < configs.values.size();) {
+ // TODO(bug 1200768): We can only handle configuring one recv H264 codec
+ if (configuredH264 && (configs.values[i]->mName == "H264")) {
+ delete configs.values[i];
+ configs.values.erase(configs.values.begin() + i);
+ continue;
+ }
+
+ // TODO(bug 1018791): This really should be checked sooner
+ if (EnsureExternalCodec(*conduit, configs.values[i], false)) {
+ delete configs.values[i];
+ configs.values.erase(configs.values.begin() + i);
+ continue;
+ }
+
+ if (configs.values[i]->mName == "H264") {
+ configuredH264 = true;
+ }
+ ++i;
+ }
+
+ auto error = conduit->ConfigureRecvMediaCodecs(configs.values);
+
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "ConfigureRecvMediaCodecs failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ // For now we only expect to have one ssrc per local track.
+ auto ssrcs = aTrack.GetSsrcs();
+ if (!ssrcs.empty()) {
+ if (!conduit->SetLocalSSRC(ssrcs.front())) {
+ MOZ_MTLOG(ML_ERROR, "SetLocalSSRC failed");
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ conduit->SetLocalCNAME(aTrack.GetCNAME().c_str());
+
+ rv = ConfigureVideoCodecMode(aTrack,*conduit);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // TODO(bug 1018791): This really should be checked sooner
+ if (EnsureExternalCodec(*conduit, configs.values[0], true)) {
+ MOZ_MTLOG(ML_ERROR, "External codec not available");
+ return NS_ERROR_FAILURE;
+ }
+
+
+ auto error = conduit->ConfigureSendMediaCodec(configs.values[0]);
+
+ const SdpExtmapAttributeList::Extmap* rtpStreamIdExt =
+ aTrack.GetNegotiatedDetails()->GetExt(
+ "urn:ietf:params:rtp-hdrext:sdes:rtp-stream-id");
+
+ if (rtpStreamIdExt) {
+ MOZ_MTLOG(ML_DEBUG, "Calling EnableRTPSenderIdExtension");
+ error = conduit->EnableRTPStreamIdExtension(true, rtpStreamIdExt->entry);
+
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "EnableRTPSenderIdExtension failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "ConfigureSendMediaCodec failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ *aConduitp = conduit;
+
+ return NS_OK;
+}
+
+nsresult
+MediaPipelineFactory::ConfigureVideoCodecMode(const JsepTrack& aTrack,
+ VideoSessionConduit& aConduit)
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<LocalSourceStreamInfo> stream =
+ mPCMedia->GetLocalStreamByTrackId(aTrack.GetTrackId());
+
+ //get video track
+ RefPtr<mozilla::dom::MediaStreamTrack> track =
+ stream->GetTrackById(aTrack.GetTrackId());
+
+ RefPtr<mozilla::dom::VideoStreamTrack> videotrack =
+ track->AsVideoStreamTrack();
+
+ if (!videotrack) {
+ MOZ_MTLOG(ML_ERROR, "video track not available");
+ return NS_ERROR_FAILURE;
+ }
+
+ dom::MediaSourceEnum source = videotrack->GetSource().GetMediaSource();
+ webrtc::VideoCodecMode mode = webrtc::kRealtimeVideo;
+ switch (source) {
+ case dom::MediaSourceEnum::Browser:
+ case dom::MediaSourceEnum::Screen:
+ case dom::MediaSourceEnum::Application:
+ case dom::MediaSourceEnum::Window:
+ mode = webrtc::kScreensharing;
+ break;
+
+ case dom::MediaSourceEnum::Camera:
+ default:
+ mode = webrtc::kRealtimeVideo;
+ break;
+ }
+
+ auto error = aConduit.ConfigureCodecMode(mode);
+ if (error) {
+ MOZ_MTLOG(ML_ERROR, "ConfigureCodecMode failed: " << error);
+ return NS_ERROR_FAILURE;
+ }
+
+#endif
+ return NS_OK;
+}
+
+/*
+ * Add external H.264 video codec.
+ */
+MediaConduitErrorCode
+MediaPipelineFactory::EnsureExternalCodec(VideoSessionConduit& aConduit,
+ VideoCodecConfig* aConfig,
+ bool aIsSend)
+{
+ if (aConfig->mName == "VP8") {
+#ifdef MOZ_WEBRTC_MEDIACODEC
+ if (aIsSend) {
+#ifdef MOZILLA_INTERNAL_API
+ bool enabled = mozilla::Preferences::GetBool("media.navigator.hardware.vp8_encode.acceleration_enabled", false);
+#else
+ bool enabled = false;
+#endif
+ if (enabled) {
+ nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
+ if (gfxInfo) {
+ int32_t status;
+ nsCString discardFailureId;
+ if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(nsIGfxInfo::FEATURE_WEBRTC_HW_ACCELERATION_ENCODE, discardFailureId, &status))) {
+ if (status != nsIGfxInfo::FEATURE_STATUS_OK) {
+ NS_WARNING("VP8 encoder hardware is not whitelisted: disabling.\n");
+ } else {
+ VideoEncoder* encoder = nullptr;
+ encoder = MediaCodecVideoCodec::CreateEncoder(MediaCodecVideoCodec::CodecType::CODEC_VP8);
+ if (encoder) {
+ return aConduit.SetExternalSendCodec(aConfig, encoder);
+ }
+ return kMediaConduitNoError;
+ }
+ }
+ }
+ }
+ } else {
+#ifdef MOZILLA_INTERNAL_API
+ bool enabled = mozilla::Preferences::GetBool("media.navigator.hardware.vp8_decode.acceleration_enabled", false);
+#else
+ bool enabled = false;
+#endif
+ if (enabled) {
+ nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
+ if (gfxInfo) {
+ int32_t status;
+ nsCString discardFailureId;
+ if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(nsIGfxInfo::FEATURE_WEBRTC_HW_ACCELERATION_DECODE, discardFailureId, &status))) {
+ if (status != nsIGfxInfo::FEATURE_STATUS_OK) {
+ NS_WARNING("VP8 decoder hardware is not whitelisted: disabling.\n");
+ } else {
+ VideoDecoder* decoder;
+ decoder = MediaCodecVideoCodec::CreateDecoder(MediaCodecVideoCodec::CodecType::CODEC_VP8);
+ if (decoder) {
+ return aConduit.SetExternalRecvCodec(aConfig, decoder);
+ }
+ return kMediaConduitNoError;
+ }
+ }
+ }
+ }
+ }
+#endif
+ return kMediaConduitNoError;
+ }
+ if (aConfig->mName == "VP9") {
+ return kMediaConduitNoError;
+ }
+ if (aConfig->mName == "H264") {
+ if (aConduit.CodecPluginID() != 0) {
+ return kMediaConduitNoError;
+ }
+ // Register H.264 codec.
+ if (aIsSend) {
+ VideoEncoder* encoder = nullptr;
+#ifdef MOZ_WEBRTC_OMX
+ encoder =
+ OMXVideoCodec::CreateEncoder(OMXVideoCodec::CodecType::CODEC_H264);
+#else
+ encoder = GmpVideoCodec::CreateEncoder();
+#endif
+ if (encoder) {
+ return aConduit.SetExternalSendCodec(aConfig, encoder);
+ }
+ return kMediaConduitInvalidSendCodec;
+ }
+ VideoDecoder* decoder = nullptr;
+#ifdef MOZ_WEBRTC_OMX
+ decoder =
+ OMXVideoCodec::CreateDecoder(OMXVideoCodec::CodecType::CODEC_H264);
+#else
+ decoder = GmpVideoCodec::CreateDecoder();
+#endif
+ if (decoder) {
+ return aConduit.SetExternalRecvCodec(aConfig, decoder);
+ }
+ return kMediaConduitInvalidReceiveCodec;
+ }
+ MOZ_MTLOG(ML_ERROR,
+ "Invalid video codec configured: " << aConfig->mName.c_str());
+ return aIsSend ? kMediaConduitInvalidSendCodec
+ : kMediaConduitInvalidReceiveCodec;
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.h b/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.h
new file mode 100644
index 000000000..972c4368a
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/MediaPipelineFactory.h
@@ -0,0 +1,82 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef _MEDIAPIPELINEFACTORY_H_
+#define _MEDIAPIPELINEFACTORY_H_
+
+#include "MediaConduitInterface.h"
+#include "PeerConnectionMedia.h"
+#include "transportflow.h"
+
+#include "signaling/src/jsep/JsepTrack.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+
+class MediaPipelineFactory
+{
+public:
+ explicit MediaPipelineFactory(PeerConnectionMedia* aPCMedia)
+ : mPCMedia(aPCMedia), mPC(aPCMedia->GetPC())
+ {
+ }
+
+ nsresult CreateOrUpdateMediaPipeline(const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack);
+
+private:
+ nsresult CreateMediaPipelineReceiving(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ size_t level,
+ RefPtr<TransportFlow> aRtpFlow,
+ RefPtr<TransportFlow> aRtcpFlow,
+ nsAutoPtr<MediaPipelineFilter> filter,
+ const RefPtr<MediaSessionConduit>& aConduit);
+
+ nsresult CreateMediaPipelineSending(
+ const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ size_t level,
+ RefPtr<TransportFlow> aRtpFlow,
+ RefPtr<TransportFlow> aRtcpFlow,
+ nsAutoPtr<MediaPipelineFilter> filter,
+ const RefPtr<MediaSessionConduit>& aConduit);
+
+ nsresult GetOrCreateAudioConduit(const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ RefPtr<MediaSessionConduit>* aConduitp);
+
+ nsresult GetOrCreateVideoConduit(const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ RefPtr<MediaSessionConduit>* aConduitp);
+
+ MediaConduitErrorCode EnsureExternalCodec(VideoSessionConduit& aConduit,
+ VideoCodecConfig* aConfig,
+ bool aIsSend);
+
+ nsresult CreateOrGetTransportFlow(size_t aLevel, bool aIsRtcp,
+ const JsepTransport& transport,
+ RefPtr<TransportFlow>* out);
+
+ nsresult GetTransportParameters(const JsepTrackPair& aTrackPair,
+ const JsepTrack& aTrack,
+ size_t* aLevelOut,
+ RefPtr<TransportFlow>* aRtpOut,
+ RefPtr<TransportFlow>* aRtcpOut,
+ nsAutoPtr<MediaPipelineFilter>* aFilterOut);
+
+ nsresult ConfigureVideoCodecMode(const JsepTrack& aTrack,
+ VideoSessionConduit& aConduit);
+
+private:
+ // Not owned, and assumed to exist as long as the factory.
+ // The factory is a transient object, so this is fairly easy.
+ PeerConnectionMedia* mPCMedia;
+ PeerConnectionImpl* mPC;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/peerconnection/MediaStreamList.cpp b/media/webrtc/signaling/src/peerconnection/MediaStreamList.cpp
new file mode 100644
index 000000000..f10b4447f
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/MediaStreamList.cpp
@@ -0,0 +1,104 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+#include "base/basictypes.h"
+#include "MediaStreamList.h"
+#ifdef MOZILLA_INTERNAL_API
+#include "mozilla/dom/MediaStreamListBinding.h"
+#endif
+#include "nsIScriptGlobalObject.h"
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionMedia.h"
+
+namespace mozilla {
+namespace dom {
+
+MediaStreamList::MediaStreamList(PeerConnectionImpl* peerConnection,
+ StreamType type)
+ : mPeerConnection(peerConnection),
+ mType(type)
+{
+}
+
+MediaStreamList::~MediaStreamList()
+{
+}
+
+#ifdef MOZILLA_INTERNAL_API
+NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_0(MediaStreamList)
+#else
+NS_IMPL_CYCLE_COLLECTION_CLASS(MediaStreamList)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaStreamList)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_END
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(MediaStreamList)
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(MediaStreamList)
+NS_IMPL_CYCLE_COLLECTION_TRACE_END
+#endif
+
+NS_IMPL_CYCLE_COLLECTING_ADDREF(MediaStreamList)
+NS_IMPL_CYCLE_COLLECTING_RELEASE(MediaStreamList)
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaStreamList)
+ NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+JSObject*
+MediaStreamList::WrapObject(JSContext* cx, JS::Handle<JSObject*> aGivenProto)
+{
+#ifdef MOZILLA_INTERNAL_API
+ return MediaStreamListBinding::Wrap(cx, this, aGivenProto);
+#else
+ return nullptr;
+#endif
+}
+
+nsISupports*
+MediaStreamList::GetParentObject()
+{
+ return mPeerConnection->GetWindow();
+}
+
+template<class T>
+static DOMMediaStream*
+GetStreamFromInfo(T* info, bool& found)
+{
+ if (!info) {
+ found = false;
+ return nullptr;
+ }
+
+ found = true;
+ return info->GetMediaStream();
+}
+
+DOMMediaStream*
+MediaStreamList::IndexedGetter(uint32_t index, bool& found)
+{
+ if (!mPeerConnection->media()) { // PeerConnection closed
+ found = false;
+ return nullptr;
+ }
+ if (mType == Local) {
+ return GetStreamFromInfo(mPeerConnection->media()->
+ GetLocalStreamByIndex(index), found);
+ }
+
+ return GetStreamFromInfo(mPeerConnection->media()->
+ GetRemoteStreamByIndex(index), found);
+}
+
+uint32_t
+MediaStreamList::Length()
+{
+ if (!mPeerConnection->media()) { // PeerConnection closed
+ return 0;
+ }
+ return mType == Local ? mPeerConnection->media()->LocalStreamsLength() :
+ mPeerConnection->media()->RemoteStreamsLength();
+}
+
+} // namespace dom
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/peerconnection/MediaStreamList.h b/media/webrtc/signaling/src/peerconnection/MediaStreamList.h
new file mode 100644
index 000000000..de9040227
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/MediaStreamList.h
@@ -0,0 +1,54 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaStreamList_h__
+#define MediaStreamList_h__
+
+#include "mozilla/ErrorResult.h"
+#include "nsISupportsImpl.h"
+#include "nsAutoPtr.h"
+#include "nsWrapperCache.h"
+
+#ifdef USE_FAKE_MEDIA_STREAMS
+#include "FakeMediaStreams.h"
+#else
+#include "DOMMediaStream.h"
+#endif
+
+namespace mozilla {
+class PeerConnectionImpl;
+namespace dom {
+
+class MediaStreamList : public nsISupports,
+ public nsWrapperCache
+{
+public:
+ enum StreamType {
+ Local,
+ Remote
+ };
+
+ MediaStreamList(PeerConnectionImpl* peerConnection, StreamType type);
+
+ NS_DECL_CYCLE_COLLECTING_ISUPPORTS
+ NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(MediaStreamList)
+
+ virtual JSObject* WrapObject(JSContext *cx, JS::Handle<JSObject*> aGivenProto)
+ override;
+ nsISupports* GetParentObject();
+
+ DOMMediaStream* IndexedGetter(uint32_t index, bool& found);
+ uint32_t Length();
+
+private:
+ virtual ~MediaStreamList();
+
+ RefPtr<PeerConnectionImpl> mPeerConnection;
+ StreamType mType;
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif // MediaStreamList_h__
diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.cpp b/media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.cpp
new file mode 100644
index 000000000..515258efb
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.cpp
@@ -0,0 +1,452 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CSFLog.h"
+
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionCtx.h"
+#include "runnable_utils.h"
+#include "prcvar.h"
+
+#include "mozilla/Telemetry.h"
+#include "browser_logging/WebRtcLog.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "mozilla/dom/RTCPeerConnectionBinding.h"
+#include "mozilla/Preferences.h"
+#include <mozilla/Types.h>
+#endif
+
+#include "nsNetCID.h" // NS_SOCKETTRANSPORTSERVICE_CONTRACTID
+#include "nsServiceManagerUtils.h" // do_GetService
+#include "nsIObserverService.h"
+#include "nsIObserver.h"
+#include "mozilla/Services.h"
+#include "mozilla/StaticPtr.h"
+
+#include "gmp-video-decode.h" // GMP_API_VIDEO_DECODER
+#include "gmp-video-encode.h" // GMP_API_VIDEO_ENCODER
+
+static const char* logTag = "PeerConnectionCtx";
+
+namespace mozilla {
+
+using namespace dom;
+
+class PeerConnectionCtxShutdown : public nsIObserver
+{
+public:
+ NS_DECL_ISUPPORTS
+
+ PeerConnectionCtxShutdown() {}
+
+ void Init()
+ {
+ nsCOMPtr<nsIObserverService> observerService =
+ services::GetObserverService();
+ if (!observerService)
+ return;
+
+ nsresult rv = NS_OK;
+
+#ifdef MOZILLA_INTERNAL_API
+ rv = observerService->AddObserver(this,
+ NS_XPCOM_SHUTDOWN_OBSERVER_ID,
+ false);
+ MOZ_ALWAYS_SUCCEEDS(rv);
+#endif
+ (void) rv;
+ }
+
+ NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData) override {
+ if (strcmp(aTopic, NS_XPCOM_SHUTDOWN_OBSERVER_ID) == 0) {
+ CSFLogDebug(logTag, "Shutting down PeerConnectionCtx");
+ PeerConnectionCtx::Destroy();
+
+ nsCOMPtr<nsIObserverService> observerService =
+ services::GetObserverService();
+ if (!observerService)
+ return NS_ERROR_FAILURE;
+
+ nsresult rv = observerService->RemoveObserver(this,
+ NS_XPCOM_SHUTDOWN_OBSERVER_ID);
+ MOZ_ALWAYS_SUCCEEDS(rv);
+
+ // Make sure we're not deleted while still inside ::Observe()
+ RefPtr<PeerConnectionCtxShutdown> kungFuDeathGrip(this);
+ PeerConnectionCtx::gPeerConnectionCtxShutdown = nullptr;
+ }
+ return NS_OK;
+ }
+
+private:
+ virtual ~PeerConnectionCtxShutdown()
+ {
+ nsCOMPtr<nsIObserverService> observerService =
+ services::GetObserverService();
+ if (observerService)
+ observerService->RemoveObserver(this, NS_XPCOM_SHUTDOWN_OBSERVER_ID);
+ }
+};
+
+NS_IMPL_ISUPPORTS(PeerConnectionCtxShutdown, nsIObserver);
+}
+
+namespace mozilla {
+
+PeerConnectionCtx* PeerConnectionCtx::gInstance;
+nsIThread* PeerConnectionCtx::gMainThread;
+StaticRefPtr<PeerConnectionCtxShutdown> PeerConnectionCtx::gPeerConnectionCtxShutdown;
+
+const std::map<const std::string, PeerConnectionImpl *>&
+PeerConnectionCtx::mGetPeerConnections()
+{
+ return mPeerConnections;
+}
+
+nsresult PeerConnectionCtx::InitializeGlobal(nsIThread *mainThread,
+ nsIEventTarget* stsThread) {
+ if (!gMainThread) {
+ gMainThread = mainThread;
+ } else {
+ MOZ_ASSERT(gMainThread == mainThread);
+ }
+
+ nsresult res;
+
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (!gInstance) {
+ CSFLogDebug(logTag, "Creating PeerConnectionCtx");
+ PeerConnectionCtx *ctx = new PeerConnectionCtx();
+
+ res = ctx->Initialize();
+ PR_ASSERT(NS_SUCCEEDED(res));
+ if (!NS_SUCCEEDED(res))
+ return res;
+
+ gInstance = ctx;
+
+ if (!PeerConnectionCtx::gPeerConnectionCtxShutdown) {
+ PeerConnectionCtx::gPeerConnectionCtxShutdown = new PeerConnectionCtxShutdown();
+ PeerConnectionCtx::gPeerConnectionCtxShutdown->Init();
+ }
+ }
+
+ EnableWebRtcLog();
+ return NS_OK;
+}
+
+PeerConnectionCtx* PeerConnectionCtx::GetInstance() {
+ MOZ_ASSERT(gInstance);
+ return gInstance;
+}
+
+bool PeerConnectionCtx::isActive() {
+ return gInstance;
+}
+
+void PeerConnectionCtx::Destroy() {
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+
+ if (gInstance) {
+ gInstance->Cleanup();
+ delete gInstance;
+ gInstance = nullptr;
+ }
+
+ StopWebRtcLog();
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+typedef Vector<nsAutoPtr<RTCStatsQuery>> RTCStatsQueries;
+
+// Telemetry reporting every second after start of first call.
+// The threading model around the media pipelines is weird:
+// - The pipelines are containers,
+// - containers that are only safe on main thread, with members only safe on STS,
+// - hence the there and back again approach.
+
+static auto
+FindId(const Sequence<RTCInboundRTPStreamStats>& aArray,
+ const nsString &aId) -> decltype(aArray.Length()) {
+ for (decltype(aArray.Length()) i = 0; i < aArray.Length(); i++) {
+ if (aArray[i].mId.Value() == aId) {
+ return i;
+ }
+ }
+ return aArray.NoIndex;
+}
+
+static auto
+FindId(const nsTArray<nsAutoPtr<RTCStatsReportInternal>>& aArray,
+ const nsString &aId) -> decltype(aArray.Length()) {
+ for (decltype(aArray.Length()) i = 0; i < aArray.Length(); i++) {
+ if (aArray[i]->mPcid == aId) {
+ return i;
+ }
+ }
+ return aArray.NoIndex;
+}
+
+static void
+FreeOnMain_m(nsAutoPtr<RTCStatsQueries> aQueryList) {
+ MOZ_ASSERT(NS_IsMainThread());
+}
+
+static void
+EverySecondTelemetryCallback_s(nsAutoPtr<RTCStatsQueries> aQueryList) {
+ using namespace Telemetry;
+
+ if(!PeerConnectionCtx::isActive()) {
+ return;
+ }
+ PeerConnectionCtx *ctx = PeerConnectionCtx::GetInstance();
+
+ for (auto q = aQueryList->begin(); q != aQueryList->end(); ++q) {
+ PeerConnectionImpl::ExecuteStatsQuery_s(*q);
+ auto& r = *(*q)->report;
+ if (r.mInboundRTPStreamStats.WasPassed()) {
+ // First, get reports from a second ago, if any, for calculations below
+ const Sequence<RTCInboundRTPStreamStats> *lastInboundStats = nullptr;
+ {
+ auto i = FindId(ctx->mLastReports, r.mPcid);
+ if (i != ctx->mLastReports.NoIndex) {
+ lastInboundStats = &ctx->mLastReports[i]->mInboundRTPStreamStats.Value();
+ }
+ }
+ // Then, look for the things we want telemetry on
+ auto& array = r.mInboundRTPStreamStats.Value();
+ for (decltype(array.Length()) i = 0; i < array.Length(); i++) {
+ auto& s = array[i];
+ bool isAudio = (s.mId.Value().Find("audio") != -1);
+ if (s.mPacketsLost.WasPassed() && s.mPacketsReceived.WasPassed() &&
+ (s.mPacketsLost.Value() + s.mPacketsReceived.Value()) != 0) {
+ ID id;
+ if (s.mIsRemote) {
+ id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_PACKETLOSS_RATE :
+ WEBRTC_VIDEO_QUALITY_OUTBOUND_PACKETLOSS_RATE;
+ } else {
+ id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_PACKETLOSS_RATE :
+ WEBRTC_VIDEO_QUALITY_INBOUND_PACKETLOSS_RATE;
+ }
+ // *1000 so we can read in 10's of a percent (permille)
+ Accumulate(id,
+ (s.mPacketsLost.Value() * 1000) /
+ (s.mPacketsLost.Value() + s.mPacketsReceived.Value()));
+ }
+ if (s.mJitter.WasPassed()) {
+ ID id;
+ if (s.mIsRemote) {
+ id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_JITTER :
+ WEBRTC_VIDEO_QUALITY_OUTBOUND_JITTER;
+ } else {
+ id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_JITTER :
+ WEBRTC_VIDEO_QUALITY_INBOUND_JITTER;
+ }
+ Accumulate(id, s.mJitter.Value());
+ }
+ if (s.mMozRtt.WasPassed()) {
+ MOZ_ASSERT(s.mIsRemote);
+ ID id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_RTT :
+ WEBRTC_VIDEO_QUALITY_OUTBOUND_RTT;
+ Accumulate(id, s.mMozRtt.Value());
+ }
+ if (lastInboundStats && s.mBytesReceived.WasPassed()) {
+ auto& laststats = *lastInboundStats;
+ auto i = FindId(laststats, s.mId.Value());
+ if (i != laststats.NoIndex) {
+ auto& lasts = laststats[i];
+ if (lasts.mBytesReceived.WasPassed()) {
+ auto delta_ms = int32_t(s.mTimestamp.Value() -
+ lasts.mTimestamp.Value());
+ // In theory we're called every second, so delta *should* be in that range.
+ // Small deltas could cause errors due to division
+ if (delta_ms > 500 && delta_ms < 60000) {
+ ID id;
+ if (s.mIsRemote) {
+ id = isAudio ? WEBRTC_AUDIO_QUALITY_OUTBOUND_BANDWIDTH_KBITS :
+ WEBRTC_VIDEO_QUALITY_OUTBOUND_BANDWIDTH_KBITS;
+ } else {
+ id = isAudio ? WEBRTC_AUDIO_QUALITY_INBOUND_BANDWIDTH_KBITS :
+ WEBRTC_VIDEO_QUALITY_INBOUND_BANDWIDTH_KBITS;
+ }
+ Accumulate(id, ((s.mBytesReceived.Value() -
+ lasts.mBytesReceived.Value()) * 8) / delta_ms);
+ }
+ // We could accumulate values until enough time has passed
+ // and then Accumulate() but this isn't that important.
+ }
+ }
+ }
+ }
+ }
+ }
+ // Steal and hang on to reports for the next second
+ ctx->mLastReports.Clear();
+ for (auto q = aQueryList->begin(); q != aQueryList->end(); ++q) {
+ ctx->mLastReports.AppendElement((*q)->report.forget()); // steal avoids copy
+ }
+ // Container must be freed back on main thread
+ NS_DispatchToMainThread(WrapRunnableNM(&FreeOnMain_m, aQueryList),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionCtx::EverySecondTelemetryCallback_m(nsITimer* timer, void *closure) {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(PeerConnectionCtx::isActive());
+ auto ctx = static_cast<PeerConnectionCtx*>(closure);
+ if (ctx->mPeerConnections.empty()) {
+ return;
+ }
+ nsresult rv;
+ nsCOMPtr<nsIEventTarget> stsThread =
+ do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+ MOZ_ASSERT(stsThread);
+
+ nsAutoPtr<RTCStatsQueries> queries(new RTCStatsQueries);
+ for (auto p = ctx->mPeerConnections.begin();
+ p != ctx->mPeerConnections.end(); ++p) {
+ if (p->second->HasMedia()) {
+ if (!queries->append(nsAutoPtr<RTCStatsQuery>(new RTCStatsQuery(true)))) {
+ return;
+ }
+ if (NS_WARN_IF(NS_FAILED(p->second->BuildStatsQuery_m(nullptr, // all tracks
+ queries->back())))) {
+ queries->popBack();
+ } else {
+ MOZ_ASSERT(queries->back()->report);
+ }
+ }
+ }
+ if (!queries->empty()) {
+ rv = RUN_ON_THREAD(stsThread,
+ WrapRunnableNM(&EverySecondTelemetryCallback_s, queries),
+ NS_DISPATCH_NORMAL);
+ NS_ENSURE_SUCCESS_VOID(rv);
+ }
+}
+#endif
+
+nsresult PeerConnectionCtx::Initialize() {
+ initGMP();
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ mTelemetryTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+ MOZ_ASSERT(mTelemetryTimer);
+ nsresult rv = mTelemetryTimer->SetTarget(gMainThread);
+ NS_ENSURE_SUCCESS(rv, rv);
+ mTelemetryTimer->InitWithFuncCallback(EverySecondTelemetryCallback_m, this, 1000,
+ nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP);
+
+ if (XRE_IsContentProcess()) {
+ WebrtcGlobalChild::Create();
+ }
+#endif // MOZILLA_INTERNAL_API
+
+ return NS_OK;
+}
+
+static void GMPReady_m() {
+ if (PeerConnectionCtx::isActive()) {
+ PeerConnectionCtx::GetInstance()->onGMPReady();
+ }
+};
+
+static void GMPReady() {
+ PeerConnectionCtx::gMainThread->Dispatch(WrapRunnableNM(&GMPReady_m),
+ NS_DISPATCH_NORMAL);
+};
+
+void PeerConnectionCtx::initGMP()
+{
+ mGMPService = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+
+ if (!mGMPService) {
+ CSFLogError(logTag, "%s failed to get the gecko-media-plugin-service",
+ __FUNCTION__);
+ return;
+ }
+
+ nsCOMPtr<nsIThread> thread;
+ nsresult rv = mGMPService->GetThread(getter_AddRefs(thread));
+
+ if (NS_FAILED(rv)) {
+ mGMPService = nullptr;
+ CSFLogError(logTag,
+ "%s failed to get the gecko-media-plugin thread, err=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(rv));
+ return;
+ }
+
+ // presumes that all GMP dir scans have been queued for the GMPThread
+ thread->Dispatch(WrapRunnableNM(&GMPReady), NS_DISPATCH_NORMAL);
+}
+
+nsresult PeerConnectionCtx::Cleanup() {
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+
+ mQueuedJSEPOperations.Clear();
+ mGMPService = nullptr;
+ return NS_OK;
+}
+
+PeerConnectionCtx::~PeerConnectionCtx() {
+ // ensure mTelemetryTimer ends on main thread
+ MOZ_ASSERT(NS_IsMainThread());
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (mTelemetryTimer) {
+ mTelemetryTimer->Cancel();
+ }
+#endif
+};
+
+void PeerConnectionCtx::queueJSEPOperation(nsIRunnable* aOperation) {
+ mQueuedJSEPOperations.AppendElement(aOperation);
+}
+
+void PeerConnectionCtx::onGMPReady() {
+ mGMPReady = true;
+ for (size_t i = 0; i < mQueuedJSEPOperations.Length(); ++i) {
+ mQueuedJSEPOperations[i]->Run();
+ }
+ mQueuedJSEPOperations.Clear();
+}
+
+bool PeerConnectionCtx::gmpHasH264() {
+ if (!mGMPService) {
+ return false;
+ }
+
+ // XXX I'd prefer if this was all known ahead of time...
+
+ nsTArray<nsCString> tags;
+ tags.AppendElement(NS_LITERAL_CSTRING("h264"));
+
+ bool has_gmp;
+ nsresult rv;
+ rv = mGMPService->HasPluginForAPI(NS_LITERAL_CSTRING(GMP_API_VIDEO_ENCODER),
+ &tags,
+ &has_gmp);
+ if (NS_FAILED(rv) || !has_gmp) {
+ return false;
+ }
+
+ rv = mGMPService->HasPluginForAPI(NS_LITERAL_CSTRING(GMP_API_VIDEO_DECODER),
+ &tags,
+ &has_gmp);
+ if (NS_FAILED(rv) || !has_gmp) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.h b/media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.h
new file mode 100644
index 000000000..3f7d6250b
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionCtx.h
@@ -0,0 +1,109 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef peerconnectionctx_h___h__
+#define peerconnectionctx_h___h__
+
+#include <string>
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "WebrtcGlobalChild.h"
+#endif
+
+#include "mozilla/Attributes.h"
+#include "mozilla/StaticPtr.h"
+#include "PeerConnectionImpl.h"
+#include "mozIGeckoMediaPluginService.h"
+#include "nsIRunnable.h"
+
+namespace mozilla {
+class PeerConnectionCtxShutdown;
+
+namespace dom {
+class WebrtcGlobalInformation;
+}
+
+// A class to hold some of the singleton objects we need:
+// * The global PeerConnectionImpl table and its associated lock.
+// * Stats report objects for PCs that are gone
+// * GMP related state
+class PeerConnectionCtx {
+ public:
+ static nsresult InitializeGlobal(nsIThread *mainThread, nsIEventTarget *stsThread);
+ static PeerConnectionCtx* GetInstance();
+ static bool isActive();
+ static void Destroy();
+
+ bool isReady() {
+ // If mGMPService is not set, we aren't using GMP.
+ if (mGMPService) {
+ return mGMPReady;
+ }
+ return true;
+ }
+
+ void queueJSEPOperation(nsIRunnable* aJSEPOperation);
+ void onGMPReady();
+
+ bool gmpHasH264();
+
+ // Make these classes friend so that they can access mPeerconnections.
+ friend class PeerConnectionImpl;
+ friend class PeerConnectionWrapper;
+ friend class mozilla::dom::WebrtcGlobalInformation;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // WebrtcGlobalInformation uses this; we put it here so we don't need to
+ // create another shutdown observer class.
+ mozilla::dom::Sequence<mozilla::dom::RTCStatsReportInternal>
+ mStatsForClosedPeerConnections;
+#endif
+
+ const std::map<const std::string, PeerConnectionImpl *>& mGetPeerConnections();
+ private:
+ // We could make these available only via accessors but it's too much trouble.
+ std::map<const std::string, PeerConnectionImpl *> mPeerConnections;
+
+ PeerConnectionCtx() : mGMPReady(false) {}
+ // This is a singleton, so don't copy construct it, etc.
+ PeerConnectionCtx(const PeerConnectionCtx& other) = delete;
+ void operator=(const PeerConnectionCtx& other) = delete;
+ virtual ~PeerConnectionCtx();
+
+ nsresult Initialize();
+ nsresult Cleanup();
+
+ void initGMP();
+
+ static void
+ EverySecondTelemetryCallback_m(nsITimer* timer, void *);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsCOMPtr<nsITimer> mTelemetryTimer;
+
+public:
+ // TODO(jib): If we ever enable move semantics on std::map...
+ //std::map<nsString,nsAutoPtr<mozilla::dom::RTCStatsReportInternal>> mLastReports;
+ nsTArray<nsAutoPtr<mozilla::dom::RTCStatsReportInternal>> mLastReports;
+private:
+#endif
+
+ // We cannot form offers/answers properly until the Gecko Media Plugin stuff
+ // has been initted, which is a complicated mess of thread dispatches,
+ // including sync dispatches to main. So, we need to be able to queue up
+ // offer creation (or SetRemote, when we're the answerer) until all of this is
+ // ready to go, since blocking on this init is just begging for deadlock.
+ nsCOMPtr<mozIGeckoMediaPluginService> mGMPService;
+ bool mGMPReady;
+ nsTArray<nsCOMPtr<nsIRunnable>> mQueuedJSEPOperations;
+
+ static PeerConnectionCtx *gInstance;
+public:
+ static nsIThread *gMainThread;
+ static mozilla::StaticRefPtr<mozilla::PeerConnectionCtxShutdown> gPeerConnectionCtxShutdown;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
new file mode 100644
index 000000000..33422ed7a
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -0,0 +1,4176 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstdlib>
+#include <cerrno>
+#include <deque>
+#include <set>
+#include <sstream>
+#include <vector>
+
+#include "CSFLog.h"
+#include "timecard.h"
+
+#include "jsapi.h"
+#include "nspr.h"
+#include "nss.h"
+#include "pk11pub.h"
+
+#include "nsNetCID.h"
+#include "nsIProperty.h"
+#include "nsIPropertyBag2.h"
+#include "nsIServiceManager.h"
+#include "nsISimpleEnumerator.h"
+#include "nsServiceManagerUtils.h"
+#include "nsISocketTransportService.h"
+#include "nsIConsoleService.h"
+#include "nsThreadUtils.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+#include "nsProxyRelease.h"
+#include "nsQueryObject.h"
+#include "prtime.h"
+
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include "runnable_utils.h"
+#include "PeerConnectionCtx.h"
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionMedia.h"
+#include "nsDOMDataChannelDeclarations.h"
+#include "dtlsidentity.h"
+#include "signaling/src/sdp/SdpAttribute.h"
+
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepSession.h"
+#include "signaling/src/jsep/JsepSessionImpl.h"
+
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Sprintf.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#ifdef XP_WIN
+// We need to undef the MS macro for nsIDocument::CreateEvent
+#ifdef CreateEvent
+#undef CreateEvent
+#endif
+#endif // XP_WIN
+
+#include "nsIDocument.h"
+#include "nsGlobalWindow.h"
+#include "nsDOMDataChannel.h"
+#include "mozilla/dom/Performance.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/PublicSSL.h"
+#include "nsXULAppAPI.h"
+#include "nsContentUtils.h"
+#include "nsDOMJSUtils.h"
+#include "nsIScriptError.h"
+#include "nsPrintfCString.h"
+#include "nsURLHelper.h"
+#include "nsNetUtil.h"
+#include "nsIURLParser.h"
+#include "nsIDOMDataChannel.h"
+#include "nsIDOMLocation.h"
+#include "nsNullPrincipal.h"
+#include "mozilla/PeerIdentity.h"
+#include "mozilla/dom/RTCCertificate.h"
+#include "mozilla/dom/RTCConfigurationBinding.h"
+#include "mozilla/dom/RTCDTMFSenderBinding.h"
+#include "mozilla/dom/RTCDTMFToneChangeEvent.h"
+#include "mozilla/dom/RTCRtpSenderBinding.h"
+#include "mozilla/dom/RTCStatsReportBinding.h"
+#include "mozilla/dom/RTCPeerConnectionBinding.h"
+#include "mozilla/dom/PeerConnectionImplBinding.h"
+#include "mozilla/dom/DataChannelBinding.h"
+#include "mozilla/dom/PerformanceTiming.h"
+#include "mozilla/dom/PluginCrashedEvent.h"
+#include "MediaStreamList.h"
+#include "MediaStreamTrack.h"
+#include "AudioStreamTrack.h"
+#include "VideoStreamTrack.h"
+#include "nsIScriptGlobalObject.h"
+#include "MediaStreamGraph.h"
+#include "DOMMediaStream.h"
+#include "rlogconnector.h"
+#include "WebrtcGlobalInformation.h"
+#include "mozilla/dom/Event.h"
+#include "nsIDOMCustomEvent.h"
+#include "mozilla/EventDispatcher.h"
+#include "mozilla/net/DataChannelProtocol.h"
+#endif
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+#include "MediaStreamGraphImpl.h"
+#endif
+
+#ifdef XP_WIN
+// We need to undef the MS macro again in case the windows include file
+// got imported after we included nsIDocument.h
+#ifdef CreateEvent
+#undef CreateEvent
+#endif
+#endif // XP_WIN
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+#include "MediaSegment.h"
+#endif
+
+#ifdef USE_FAKE_PCOBSERVER
+#include "FakePCObserver.h"
+#else
+#include "mozilla/dom/PeerConnectionObserverBinding.h"
+#endif
+#include "mozilla/dom/PeerConnectionObserverEnumsBinding.h"
+
+#ifdef MOZ_WEBRTC_OMX
+#include "OMXVideoCodec.h"
+#include "OMXCodecWrapper.h"
+#endif
+
+#define ICE_PARSING "In RTCConfiguration passed to RTCPeerConnection constructor"
+
+using namespace mozilla;
+using namespace mozilla::dom;
+
+typedef PCObserverString ObString;
+
+static const char* logTag = "PeerConnectionImpl";
+
+// Getting exceptions back down from PCObserver is generally not harmful.
+namespace {
+// This is a terrible hack. The problem is that SuppressException is not
+// inline, and we link this file without libxul in some cases (e.g. for our test
+// setup). So we can't use ErrorResult or IgnoredErrorResult because those call
+// SuppressException... And we can't use FastErrorResult because we can't
+// include BindingUtils.h, because our linking is completely fucked up. Use
+// BaseErrorResult directly. Please do not let me see _anyone_ doing this
+// without really careful review from someone who knows what they are doing.
+class JSErrorResult :
+ public binding_danger::TErrorResult<binding_danger::JustAssertCleanupPolicy>
+{
+public:
+ ~JSErrorResult()
+ {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ SuppressException();
+#endif
+ }
+};
+
+// The WrapRunnable() macros copy passed-in args and passes them to the function
+// later on the other thread. ErrorResult cannot be passed like this because it
+// disallows copy-semantics.
+//
+// This WrappableJSErrorResult hack solves this by not actually copying the
+// ErrorResult, but creating a new one instead, which works because we don't
+// care about the result.
+//
+// Since this is for JS-calls, these can only be dispatched to the main thread.
+
+class WrappableJSErrorResult {
+public:
+ WrappableJSErrorResult()
+ : mRv(MakeUnique<JSErrorResult>()),
+ isCopy(false) {}
+ WrappableJSErrorResult(const WrappableJSErrorResult &other)
+ : mRv(MakeUnique<JSErrorResult>()),
+ isCopy(true) {}
+ ~WrappableJSErrorResult() {
+ if (isCopy) {
+ MOZ_ASSERT(NS_IsMainThread());
+ }
+ }
+ operator JSErrorResult &() { return *mRv; }
+ operator ErrorResult &() { return *mRv; }
+private:
+ mozilla::UniquePtr<JSErrorResult> mRv;
+ bool isCopy;
+};
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+static nsresult InitNSSInContent()
+{
+ NS_ENSURE_TRUE(NS_IsMainThread(), NS_ERROR_NOT_SAME_THREAD);
+
+ if (!XRE_IsContentProcess()) {
+ MOZ_ASSERT_UNREACHABLE("Must be called in content process");
+ return NS_ERROR_FAILURE;
+ }
+
+ static bool nssStarted = false;
+ if (nssStarted) {
+ return NS_OK;
+ }
+
+ if (NSS_NoDB_Init(nullptr) != SECSuccess) {
+ CSFLogError(logTag, "NSS_NoDB_Init failed.");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (NS_FAILED(mozilla::psm::InitializeCipherSuite())) {
+ CSFLogError(logTag, "Fail to set up nss cipher suite.");
+ return NS_ERROR_FAILURE;
+ }
+
+ mozilla::psm::DisableMD5();
+
+ nssStarted = true;
+
+ return NS_OK;
+}
+#endif // MOZILLA_INTERNAL_API
+
+namespace mozilla {
+ class DataChannel;
+}
+
+class nsIDOMDataChannel;
+
+PRLogModuleInfo *signalingLogInfo() {
+ static PRLogModuleInfo *logModuleInfo = nullptr;
+ if (!logModuleInfo) {
+ logModuleInfo = PR_NewLogModule("signaling");
+ }
+ return logModuleInfo;
+}
+
+// XXX Workaround for bug 998092 to maintain the existing broken semantics
+template<>
+struct nsISupportsWeakReference::COMTypeInfo<nsSupportsWeakReference, void> {
+ static const nsIID kIID;
+};
+const nsIID nsISupportsWeakReference::COMTypeInfo<nsSupportsWeakReference, void>::kIID = NS_ISUPPORTSWEAKREFERENCE_IID;
+
+namespace mozilla {
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+RTCStatsQuery::RTCStatsQuery(bool internal) :
+ failed(false),
+ internalStats(internal),
+ grabAllLevels(false) {
+}
+
+RTCStatsQuery::~RTCStatsQuery() {
+ MOZ_ASSERT(NS_IsMainThread());
+}
+
+#endif
+
+NS_IMPL_ISUPPORTS0(PeerConnectionImpl)
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+bool
+PeerConnectionImpl::WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto,
+ JS::MutableHandle<JSObject*> aReflector)
+{
+ return PeerConnectionImplBinding::Wrap(aCx, this, aGivenProto, aReflector);
+}
+#endif
+
+bool PCUuidGenerator::Generate(std::string* idp) {
+ nsresult rv;
+
+ if(!mGenerator) {
+ mGenerator = do_GetService("@mozilla.org/uuid-generator;1", &rv);
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+ if (!mGenerator) {
+ return false;
+ }
+ }
+
+ nsID id;
+ rv = mGenerator->GenerateUUIDInPlace(&id);
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+ char buffer[NSID_LENGTH];
+ id.ToProvidedString(buffer);
+ idp->assign(buffer);
+
+ return true;
+}
+
+bool IsPrivateBrowsing(nsPIDOMWindowInner* aWindow)
+{
+#if defined(MOZILLA_EXTERNAL_LINKAGE)
+ return false;
+#else
+ if (!aWindow) {
+ return false;
+ }
+
+ nsIDocument *doc = aWindow->GetExtantDoc();
+ if (!doc) {
+ return false;
+ }
+
+ nsILoadContext *loadContext = doc->GetLoadContext();
+ return loadContext && loadContext->UsePrivateBrowsing();
+#endif
+}
+
+PeerConnectionImpl::PeerConnectionImpl(const GlobalObject* aGlobal)
+: mTimeCard(MOZ_LOG_TEST(signalingLogInfo(),LogLevel::Error) ?
+ create_timecard() : nullptr)
+ , mSignalingState(PCImplSignalingState::SignalingStable)
+ , mIceConnectionState(PCImplIceConnectionState::New)
+ , mIceGatheringState(PCImplIceGatheringState::New)
+ , mDtlsConnected(false)
+ , mWindow(nullptr)
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ , mCertificate(nullptr)
+#else
+ , mIdentity(nullptr)
+#endif
+ , mPrivacyRequested(false)
+ , mSTSThread(nullptr)
+ , mAllowIceLoopback(false)
+ , mAllowIceLinkLocal(false)
+ , mMedia(nullptr)
+ , mUuidGen(MakeUnique<PCUuidGenerator>())
+ , mNumAudioStreams(0)
+ , mNumVideoStreams(0)
+ , mHaveConfiguredCodecs(false)
+ , mHaveDataStream(false)
+ , mAddCandidateErrorCount(0)
+ , mTrickle(true) // TODO(ekr@rtfm.com): Use pref
+ , mNegotiationNeeded(false)
+ , mPrivateWindow(false)
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ MOZ_ASSERT(NS_IsMainThread());
+ auto log = RLogConnector::CreateInstance();
+ if (aGlobal) {
+ mWindow = do_QueryInterface(aGlobal->GetAsSupports());
+ if (IsPrivateBrowsing(mWindow)) {
+ mPrivateWindow = true;
+ log->EnterPrivateMode();
+ }
+ }
+#endif
+ CSFLogInfo(logTag, "%s: PeerConnectionImpl constructor for %s",
+ __FUNCTION__, mHandle.c_str());
+ STAMP_TIMECARD(mTimeCard, "Constructor Completed");
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ mAllowIceLoopback = Preferences::GetBool(
+ "media.peerconnection.ice.loopback", false);
+ mAllowIceLinkLocal = Preferences::GetBool(
+ "media.peerconnection.ice.link_local", false);
+#endif
+ memset(mMaxReceiving, 0, sizeof(mMaxReceiving));
+ memset(mMaxSending, 0, sizeof(mMaxSending));
+}
+
+PeerConnectionImpl::~PeerConnectionImpl()
+{
+ if (mTimeCard) {
+ STAMP_TIMECARD(mTimeCard, "Destructor Invoked");
+ print_timecard(mTimeCard);
+ destroy_timecard(mTimeCard);
+ mTimeCard = nullptr;
+ }
+ // This aborts if not on main thread (in Debug builds)
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (mPrivateWindow) {
+ auto * log = RLogConnector::GetInstance();
+ if (log) {
+ log->ExitPrivateMode();
+ }
+ mPrivateWindow = false;
+ }
+#endif
+ if (PeerConnectionCtx::isActive()) {
+ PeerConnectionCtx::GetInstance()->mPeerConnections.erase(mHandle);
+ } else {
+ CSFLogError(logTag, "PeerConnectionCtx is already gone. Ignoring...");
+ }
+
+ CSFLogInfo(logTag, "%s: PeerConnectionImpl destructor invoked for %s",
+ __FUNCTION__, mHandle.c_str());
+
+ Close();
+
+ // Since this and Initialize() occur on MainThread, they can't both be
+ // running at once
+
+ // Right now, we delete PeerConnectionCtx at XPCOM shutdown only, but we
+ // probably want to shut it down more aggressively to save memory. We
+ // could shut down here when there are no uses. It might be more optimal
+ // to release off a timer (and XPCOM Shutdown) to avoid churn
+}
+
+already_AddRefed<DOMMediaStream>
+PeerConnectionImpl::MakeMediaStream()
+{
+ MediaStreamGraph* graph =
+ MediaStreamGraph::GetInstance(MediaStreamGraph::AUDIO_THREAD_DRIVER,
+ AudioChannel::Normal);
+
+ RefPtr<DOMMediaStream> stream =
+ DOMMediaStream::CreateSourceStreamAsInput(GetWindow(), graph);
+
+ CSFLogDebug(logTag, "Created media stream %p, inner: %p", stream.get(), stream->GetInputStream());
+
+ return stream.forget();
+}
+
+nsresult
+PeerConnectionImpl::CreateRemoteSourceStreamInfo(RefPtr<RemoteSourceStreamInfo>*
+ aInfo,
+ const std::string& aStreamID)
+{
+ MOZ_ASSERT(aInfo);
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+
+ RefPtr<DOMMediaStream> stream = MakeMediaStream();
+ if (!stream) {
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<RemoteSourceStreamInfo> remote;
+ remote = new RemoteSourceStreamInfo(stream.forget(), mMedia, aStreamID);
+ *aInfo = remote;
+
+ return NS_OK;
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+/**
+ * In JS, an RTCConfiguration looks like this:
+ *
+ * { "iceServers": [ { url:"stun:stun.example.org" },
+ * { url:"turn:turn.example.org?transport=udp",
+ * username: "jib", credential:"mypass"} ] }
+ *
+ * This function converts that into an internal PeerConnectionConfiguration
+ * object.
+ */
+nsresult
+PeerConnectionConfiguration::Init(const RTCConfiguration& aSrc)
+{
+ if (aSrc.mIceServers.WasPassed()) {
+ for (size_t i = 0; i < aSrc.mIceServers.Value().Length(); i++) {
+ nsresult rv = AddIceServer(aSrc.mIceServers.Value()[i]);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+ }
+
+ switch (aSrc.mBundlePolicy) {
+ case dom::RTCBundlePolicy::Balanced:
+ setBundlePolicy(kBundleBalanced);
+ break;
+ case dom::RTCBundlePolicy::Max_compat:
+ setBundlePolicy(kBundleMaxCompat);
+ break;
+ case dom::RTCBundlePolicy::Max_bundle:
+ setBundlePolicy(kBundleMaxBundle);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (aSrc.mIceTransportPolicy) {
+ case dom::RTCIceTransportPolicy::Relay:
+ setIceTransportPolicy(NrIceCtx::ICE_POLICY_RELAY);
+ break;
+ case dom::RTCIceTransportPolicy::All:
+ if (Preferences::GetBool("media.peerconnection.ice.no_host", false)) {
+ setIceTransportPolicy(NrIceCtx::ICE_POLICY_NO_HOST);
+ } else {
+ setIceTransportPolicy(NrIceCtx::ICE_POLICY_ALL);
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionConfiguration::AddIceServer(const RTCIceServer &aServer)
+{
+ NS_ENSURE_STATE(aServer.mUrls.WasPassed());
+ NS_ENSURE_STATE(aServer.mUrls.Value().IsStringSequence());
+ auto &urls = aServer.mUrls.Value().GetAsStringSequence();
+ for (size_t i = 0; i < urls.Length(); i++) {
+ // Without STUN/TURN handlers, NS_NewURI returns nsSimpleURI rather than
+ // nsStandardURL. To parse STUN/TURN URI's to spec
+ // http://tools.ietf.org/html/draft-nandakumar-rtcweb-stun-uri-02#section-3
+ // http://tools.ietf.org/html/draft-petithuguenin-behave-turn-uri-03#section-3
+ // we parse out the query-string, and use ParseAuthority() on the rest
+ RefPtr<nsIURI> url;
+ nsresult rv = NS_NewURI(getter_AddRefs(url), urls[i]);
+ NS_ENSURE_SUCCESS(rv, rv);
+ bool isStun = false, isStuns = false, isTurn = false, isTurns = false;
+ url->SchemeIs("stun", &isStun);
+ url->SchemeIs("stuns", &isStuns);
+ url->SchemeIs("turn", &isTurn);
+ url->SchemeIs("turns", &isTurns);
+ if (!(isStun || isStuns || isTurn || isTurns)) {
+ return NS_ERROR_FAILURE;
+ }
+ if (isTurns || isStuns) {
+ continue; // TODO: Support TURNS and STUNS (Bug 1056934)
+ }
+ nsAutoCString spec;
+ rv = url->GetSpec(spec);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // TODO(jib@mozilla.com): Revisit once nsURI supports STUN/TURN (Bug 833509)
+ int32_t port;
+ nsAutoCString host;
+ nsAutoCString transport;
+ {
+ uint32_t hostPos;
+ int32_t hostLen;
+ nsAutoCString path;
+ rv = url->GetPath(path);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ // Tolerate query-string + parse 'transport=[udp|tcp]' by hand.
+ int32_t questionmark = path.FindChar('?');
+ if (questionmark >= 0) {
+ const nsCString match = NS_LITERAL_CSTRING("transport=");
+
+ for (int32_t i = questionmark, endPos; i >= 0; i = endPos) {
+ endPos = path.FindCharInSet("&", i + 1);
+ const nsDependentCSubstring fieldvaluepair = Substring(path, i + 1,
+ endPos);
+ if (StringBeginsWith(fieldvaluepair, match)) {
+ transport = Substring(fieldvaluepair, match.Length());
+ ToLowerCase(transport);
+ }
+ }
+ path.SetLength(questionmark);
+ }
+
+ rv = net_GetAuthURLParser()->ParseAuthority(path.get(), path.Length(),
+ nullptr, nullptr,
+ nullptr, nullptr,
+ &hostPos, &hostLen, &port);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (!hostLen) {
+ return NS_ERROR_FAILURE;
+ }
+ if (hostPos > 1) /* The username was removed */
+ return NS_ERROR_FAILURE;
+ path.Mid(host, hostPos, hostLen);
+ }
+ if (port == -1)
+ port = (isStuns || isTurns)? 5349 : 3478;
+
+ if (isTurn || isTurns) {
+ NS_ConvertUTF16toUTF8 credential(aServer.mCredential.Value());
+ NS_ConvertUTF16toUTF8 username(aServer.mUsername.Value());
+
+ if (!addTurnServer(host.get(), port,
+ username.get(),
+ credential.get(),
+ (transport.IsEmpty() ?
+ kNrIceTransportUdp : transport.get()))) {
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ if (!addStunServer(host.get(), port, (transport.IsEmpty() ?
+ kNrIceTransportUdp : transport.get()))) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ }
+ return NS_OK;
+}
+#endif
+
+nsresult
+PeerConnectionImpl::Initialize(PeerConnectionObserver& aObserver,
+ nsGlobalWindow* aWindow,
+ const PeerConnectionConfiguration& aConfiguration,
+ nsISupports* aThread)
+{
+ nsresult res;
+
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aThread);
+ if (!mThread) {
+ mThread = do_QueryInterface(aThread);
+ MOZ_ASSERT(mThread);
+ }
+ CheckThread();
+
+ mPCObserver = do_GetWeakReference(&aObserver);
+
+ // Find the STS thread
+
+ mSTSThread = do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &res);
+ MOZ_ASSERT(mSTSThread);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+
+ // Initialize NSS if we are in content process. For chrome process, NSS should already
+ // been initialized.
+ if (XRE_IsParentProcess()) {
+ // This code interferes with the C++ unit test startup code.
+ nsCOMPtr<nsISupports> nssDummy = do_GetService("@mozilla.org/psm;1", &res);
+ NS_ENSURE_SUCCESS(res, res);
+ } else {
+ NS_ENSURE_SUCCESS(res = InitNSSInContent(), res);
+ }
+
+ // Currently no standalone unit tests for DataChannel,
+ // which is the user of mWindow
+ MOZ_ASSERT(aWindow);
+ mWindow = aWindow->AsInner();
+ NS_ENSURE_STATE(mWindow);
+#endif // MOZILLA_INTERNAL_API
+
+ PRTime timestamp = PR_Now();
+ // Ok if we truncate this.
+ char temp[128];
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsAutoCString locationCStr;
+
+ if (nsCOMPtr<nsIDOMLocation> location = mWindow->GetLocation()) {
+ nsAutoString locationAStr;
+ location->ToString(locationAStr);
+
+ CopyUTF16toUTF8(locationAStr, locationCStr);
+ }
+
+ SprintfLiteral(temp,
+ "%" PRIu64 " (id=%" PRIu64 " url=%s)",
+ static_cast<uint64_t>(timestamp),
+ static_cast<uint64_t>(mWindow ? mWindow->WindowID() : 0),
+ locationCStr.get() ? locationCStr.get() : "NULL");
+
+#else
+ SprintfLiteral(temp, "%" PRIu64, static_cast<uint64_t>(timestamp));
+#endif // MOZILLA_INTERNAL_API
+
+ mName = temp;
+
+ // Generate a random handle
+ unsigned char handle_bin[8];
+ SECStatus rv;
+ rv = PK11_GenerateRandom(handle_bin, sizeof(handle_bin));
+ if (rv != SECSuccess) {
+ MOZ_CRASH();
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ char hex[17];
+ SprintfLiteral(hex, "%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x",
+ handle_bin[0],
+ handle_bin[1],
+ handle_bin[2],
+ handle_bin[3],
+ handle_bin[4],
+ handle_bin[5],
+ handle_bin[6],
+ handle_bin[7]);
+
+ mHandle = hex;
+
+ STAMP_TIMECARD(mTimeCard, "Initializing PC Ctx");
+ res = PeerConnectionCtx::InitializeGlobal(mThread, mSTSThread);
+ NS_ENSURE_SUCCESS(res, res);
+
+ mMedia = new PeerConnectionMedia(this);
+
+ // Connect ICE slots.
+ mMedia->SignalIceGatheringStateChange.connect(
+ this,
+ &PeerConnectionImpl::IceGatheringStateChange);
+ mMedia->SignalUpdateDefaultCandidate.connect(
+ this,
+ &PeerConnectionImpl::UpdateDefaultCandidate);
+ mMedia->SignalEndOfLocalCandidates.connect(
+ this,
+ &PeerConnectionImpl::EndOfLocalCandidates);
+ mMedia->SignalIceConnectionStateChange.connect(
+ this,
+ &PeerConnectionImpl::IceConnectionStateChange);
+
+ mMedia->SignalCandidate.connect(this, &PeerConnectionImpl::CandidateReady);
+
+ // Initialize the media object.
+ res = mMedia->Init(aConfiguration.getStunServers(),
+ aConfiguration.getTurnServers(),
+ aConfiguration.getIceTransportPolicy());
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Couldn't initialize media object", __FUNCTION__);
+ return res;
+ }
+
+ PeerConnectionCtx::GetInstance()->mPeerConnections[mHandle] = this;
+
+ mJsepSession = MakeUnique<JsepSessionImpl>(mName,
+ MakeUnique<PCUuidGenerator>());
+
+ res = mJsepSession->Init();
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Couldn't init JSEP Session, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(res));
+ return res;
+ }
+
+ res = mJsepSession->SetIceCredentials(mMedia->ice_ctx()->ufrag(),
+ mMedia->ice_ctx()->pwd());
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Couldn't set ICE credentials, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(res));
+ return res;
+ }
+
+#if defined(MOZILLA_EXTERNAL_LINKAGE)
+ {
+ mIdentity = DtlsIdentity::Generate();
+ if (!mIdentity) {
+ return NS_ERROR_FAILURE;
+ }
+
+ std::vector<uint8_t> fingerprint;
+ res = CalculateFingerprint(DtlsIdentity::DEFAULT_HASH_ALGORITHM,
+ &fingerprint);
+ NS_ENSURE_SUCCESS(res, res);
+
+ res = mJsepSession->AddDtlsFingerprint(DtlsIdentity::DEFAULT_HASH_ALGORITHM,
+ fingerprint);
+ NS_ENSURE_SUCCESS(res, res);
+ }
+#endif
+
+ res = mJsepSession->SetBundlePolicy(aConfiguration.getBundlePolicy());
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Couldn't set bundle policy, res=%u, error=%s",
+ __FUNCTION__,
+ static_cast<unsigned>(res),
+ mJsepSession->GetLastError().c_str());
+ return res;
+ }
+
+ return NS_OK;
+}
+
+#ifndef MOZILLA_EXTERNAL_LINKAGE
+void
+PeerConnectionImpl::Initialize(PeerConnectionObserver& aObserver,
+ nsGlobalWindow& aWindow,
+ const RTCConfiguration& aConfiguration,
+ nsISupports* aThread,
+ ErrorResult &rv)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aThread);
+ mThread = do_QueryInterface(aThread);
+
+ PeerConnectionConfiguration converted;
+ nsresult res = converted.Init(aConfiguration);
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Invalid RTCConfiguration", __FUNCTION__);
+ rv.Throw(res);
+ return;
+ }
+
+ res = Initialize(aObserver, &aWindow, converted, aThread);
+ if (NS_FAILED(res)) {
+ rv.Throw(res);
+ }
+
+ if (!aConfiguration.mPeerIdentity.IsEmpty()) {
+ mPeerIdentity = new PeerIdentity(aConfiguration.mPeerIdentity);
+ mPrivacyRequested = true;
+ }
+}
+#endif
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+void
+PeerConnectionImpl::SetCertificate(mozilla::dom::RTCCertificate& aCertificate)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(!mCertificate, "This can only be called once");
+ mCertificate = &aCertificate;
+
+ std::vector<uint8_t> fingerprint;
+ nsresult rv = CalculateFingerprint(DtlsIdentity::DEFAULT_HASH_ALGORITHM,
+ &fingerprint);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Couldn't calculate fingerprint, rv=%u",
+ __FUNCTION__, static_cast<unsigned>(rv));
+ mCertificate = nullptr;
+ return;
+ }
+ rv = mJsepSession->AddDtlsFingerprint(DtlsIdentity::DEFAULT_HASH_ALGORITHM,
+ fingerprint);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Couldn't set DTLS credentials, rv=%u",
+ __FUNCTION__, static_cast<unsigned>(rv));
+ mCertificate = nullptr;
+ }
+}
+
+const RefPtr<mozilla::dom::RTCCertificate>&
+PeerConnectionImpl::Certificate() const
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mCertificate;
+}
+#endif
+
+RefPtr<DtlsIdentity>
+PeerConnectionImpl::Identity() const
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ MOZ_ASSERT(mCertificate);
+ return mCertificate->CreateDtlsIdentity();
+#else
+ RefPtr<DtlsIdentity> id = mIdentity;
+ return id;
+#endif
+}
+
+class CompareCodecPriority {
+ public:
+ void SetPreferredCodec(int32_t preferredCodec) {
+ // This pref really ought to be a string, preferably something like
+ // "H264" or "VP8" instead of a payload type.
+ // Bug 1101259.
+ std::ostringstream os;
+ os << preferredCodec;
+ mPreferredCodec = os.str();
+ }
+
+ bool operator()(JsepCodecDescription* lhs,
+ JsepCodecDescription* rhs) const {
+ if (!mPreferredCodec.empty() &&
+ lhs->mDefaultPt == mPreferredCodec &&
+ rhs->mDefaultPt != mPreferredCodec) {
+ return true;
+ }
+
+ if (lhs->mStronglyPreferred && !rhs->mStronglyPreferred) {
+ return true;
+ }
+
+ return false;
+ }
+
+ private:
+ std::string mPreferredCodec;
+};
+
+class ConfigureCodec {
+ public:
+ explicit ConfigureCodec(nsCOMPtr<nsIPrefBranch>& branch) :
+ mHardwareH264Enabled(false),
+ mHardwareH264Supported(false),
+ mSoftwareH264Enabled(false),
+ mH264Enabled(false),
+ mVP9Enabled(false),
+ mH264Level(13), // minimum suggested for WebRTC spec
+ mH264MaxBr(0), // Unlimited
+ mH264MaxMbps(0), // Unlimited
+ mVP8MaxFs(0),
+ mVP8MaxFr(0),
+ mUseTmmbr(false),
+ mUseRemb(false),
+ mUseAudioFec(false),
+ mRedUlpfecEnabled(false),
+ mDtmfEnabled(false)
+ {
+#ifdef MOZ_WEBRTC_OMX
+ // Check to see if what HW codecs are available (not in use) at this moment.
+ // Note that streaming video decode can reserve a decoder
+
+ // XXX See bug 1018791 Implement W3 codec reservation policy
+ // Note that currently, OMXCodecReservation needs to be held by an sp<> because it puts
+ // 'this' into an sp<EventListener> to talk to the resource reservation code
+
+ // This pref is a misnomer; it is solely for h264 _hardware_ support.
+ branch->GetBoolPref("media.peerconnection.video.h264_enabled",
+ &mHardwareH264Enabled);
+
+ if (mHardwareH264Enabled) {
+ // Ok, it is preffed on. Can we actually do it?
+ android::sp<android::OMXCodecReservation> encode = new android::OMXCodecReservation(true);
+ android::sp<android::OMXCodecReservation> decode = new android::OMXCodecReservation(false);
+
+ // Currently we just check if they're available right now, which will fail if we're
+ // trying to call ourself, for example. It will work for most real-world cases, like
+ // if we try to add a person to a 2-way call to make a 3-way mesh call
+ if (encode->ReserveOMXCodec() && decode->ReserveOMXCodec()) {
+ CSFLogDebug( logTag, "%s: H264 hardware codec available", __FUNCTION__);
+ mHardwareH264Supported = true;
+ }
+ }
+
+#endif // MOZ_WEBRTC_OMX
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ mSoftwareH264Enabled = PeerConnectionCtx::GetInstance()->gmpHasH264();
+#else
+ // For unit-tests
+ mSoftwareH264Enabled = true;
+#endif
+
+ mH264Enabled = mHardwareH264Supported || mSoftwareH264Enabled;
+
+ branch->GetIntPref("media.navigator.video.h264.level", &mH264Level);
+ mH264Level &= 0xFF;
+
+ branch->GetIntPref("media.navigator.video.h264.max_br", &mH264MaxBr);
+
+#ifdef MOZ_WEBRTC_OMX
+ // Level 1.2; but let's allow CIF@30 or QVGA@30+ by default
+ mH264MaxMbps = 11880;
+#endif
+
+ branch->GetIntPref("media.navigator.video.h264.max_mbps", &mH264MaxMbps);
+
+ branch->GetBoolPref("media.peerconnection.video.vp9_enabled",
+ &mVP9Enabled);
+
+ branch->GetIntPref("media.navigator.video.max_fs", &mVP8MaxFs);
+ if (mVP8MaxFs <= 0) {
+ mVP8MaxFs = 12288; // We must specify something other than 0
+ }
+
+ branch->GetIntPref("media.navigator.video.max_fr", &mVP8MaxFr);
+ if (mVP8MaxFr <= 0) {
+ mVP8MaxFr = 60; // We must specify something other than 0
+ }
+
+ // TMMBR is enabled from a pref in about:config
+ branch->GetBoolPref("media.navigator.video.use_tmmbr", &mUseTmmbr);
+
+ // REMB is enabled by default, but can be disabled from about:config
+ branch->GetBoolPref("media.navigator.video.use_remb", &mUseRemb);
+
+ branch->GetBoolPref("media.navigator.audio.use_fec", &mUseAudioFec);
+
+ branch->GetBoolPref("media.navigator.video.red_ulpfec_enabled",
+ &mRedUlpfecEnabled);
+
+ // media.peerconnection.dtmf.enabled controls both sdp generation for
+ // DTMF support as well as DTMF exposure to DOM
+ branch->GetBoolPref("media.peerconnection.dtmf.enabled", &mDtmfEnabled);
+ }
+
+ void operator()(JsepCodecDescription* codec) const
+ {
+ switch (codec->mType) {
+ case SdpMediaSection::kAudio:
+ {
+ JsepAudioCodecDescription& audioCodec =
+ static_cast<JsepAudioCodecDescription&>(*codec);
+ if (audioCodec.mName == "opus") {
+ audioCodec.mFECEnabled = mUseAudioFec;
+ } else if (audioCodec.mName == "telephone-event") {
+ audioCodec.mEnabled = mDtmfEnabled;
+ }
+ }
+ break;
+ case SdpMediaSection::kVideo:
+ {
+ JsepVideoCodecDescription& videoCodec =
+ static_cast<JsepVideoCodecDescription&>(*codec);
+
+ if (videoCodec.mName == "H264") {
+ // Override level
+ videoCodec.mProfileLevelId &= 0xFFFF00;
+ videoCodec.mProfileLevelId |= mH264Level;
+
+ videoCodec.mConstraints.maxBr = mH264MaxBr;
+
+ videoCodec.mConstraints.maxMbps = mH264MaxMbps;
+
+ // Might disable it, but we set up other params anyway
+ videoCodec.mEnabled = mH264Enabled;
+
+ if (videoCodec.mPacketizationMode == 0 && !mSoftwareH264Enabled) {
+ // We're assuming packetization mode 0 is unsupported by
+ // hardware.
+ videoCodec.mEnabled = false;
+ }
+
+ if (mHardwareH264Supported) {
+ videoCodec.mStronglyPreferred = true;
+ }
+ } else if (videoCodec.mName == "red") {
+ videoCodec.mEnabled = mRedUlpfecEnabled;
+ } else if (videoCodec.mName == "ulpfec") {
+ videoCodec.mEnabled = mRedUlpfecEnabled;
+ } else if (videoCodec.mName == "VP8" || videoCodec.mName == "VP9") {
+ if (videoCodec.mName == "VP9" && !mVP9Enabled) {
+ videoCodec.mEnabled = false;
+ break;
+ }
+ videoCodec.mConstraints.maxFs = mVP8MaxFs;
+ videoCodec.mConstraints.maxFps = mVP8MaxFr;
+ }
+
+ if (mUseTmmbr) {
+ videoCodec.EnableTmmbr();
+ }
+ if (mUseRemb) {
+ videoCodec.EnableRemb();
+ }
+ }
+ break;
+ case SdpMediaSection::kText:
+ case SdpMediaSection::kApplication:
+ case SdpMediaSection::kMessage:
+ {} // Nothing to configure for these.
+ }
+ }
+
+ private:
+ bool mHardwareH264Enabled;
+ bool mHardwareH264Supported;
+ bool mSoftwareH264Enabled;
+ bool mH264Enabled;
+ bool mVP9Enabled;
+ int32_t mH264Level;
+ int32_t mH264MaxBr;
+ int32_t mH264MaxMbps;
+ int32_t mVP8MaxFs;
+ int32_t mVP8MaxFr;
+ bool mUseTmmbr;
+ bool mUseRemb;
+ bool mUseAudioFec;
+ bool mRedUlpfecEnabled;
+ bool mDtmfEnabled;
+};
+
+class ConfigureRedCodec {
+ public:
+ explicit ConfigureRedCodec(nsCOMPtr<nsIPrefBranch>& branch,
+ std::vector<uint8_t>* redundantEncodings) :
+ mRedundantEncodings(redundantEncodings)
+ {
+ // if we wanted to override or modify which encodings are considered
+ // for redundant encodings, we'd probably want to handle it here by
+ // checking prefs modifying the operator() code below
+ }
+
+ void operator()(JsepCodecDescription* codec) const
+ {
+ if (codec->mType == SdpMediaSection::kVideo &&
+ codec->mEnabled == false) {
+ uint8_t pt = (uint8_t)strtoul(codec->mDefaultPt.c_str(), nullptr, 10);
+ // don't search for the codec payload type unless we have a valid
+ // conversion (non-zero)
+ if (pt != 0) {
+ std::vector<uint8_t>::iterator it =
+ std::find(mRedundantEncodings->begin(),
+ mRedundantEncodings->end(),
+ pt);
+ if (it != mRedundantEncodings->end()) {
+ mRedundantEncodings->erase(it);
+ }
+ }
+ }
+ }
+
+ private:
+ std::vector<uint8_t>* mRedundantEncodings;
+};
+
+nsresult
+PeerConnectionImpl::ConfigureJsepSessionCodecs() {
+ nsresult res;
+ nsCOMPtr<nsIPrefService> prefs =
+ do_GetService("@mozilla.org/preferences-service;1", &res);
+
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Couldn't get prefs service, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(res));
+ return res;
+ }
+
+ nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
+ if (!branch) {
+ CSFLogError(logTag, "%s: Couldn't get prefs branch", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ ConfigureCodec configurer(branch);
+ mJsepSession->ForEachCodec(configurer);
+
+ // first find the red codec description
+ std::vector<JsepCodecDescription*>& codecs = mJsepSession->Codecs();
+ JsepVideoCodecDescription* redCodec = nullptr;
+ for (auto codec : codecs) {
+ // we only really care about finding the RED codec if it is
+ // enabled
+ if (codec->mName == "red" && codec->mEnabled) {
+ redCodec = static_cast<JsepVideoCodecDescription*>(codec);
+ break;
+ }
+ }
+ // if red codec was found, configure it for the other enabled codecs
+ if (redCodec) {
+ ConfigureRedCodec configureRed(branch, &(redCodec->mRedundantEncodings));
+ mJsepSession->ForEachCodec(configureRed);
+ }
+
+ // We use this to sort the list of codecs once everything is configured
+ CompareCodecPriority comparator;
+
+ // Sort by priority
+ int32_t preferredCodec = 0;
+ branch->GetIntPref("media.navigator.video.preferred_codec",
+ &preferredCodec);
+
+ if (preferredCodec) {
+ comparator.SetPreferredCodec(preferredCodec);
+ }
+
+ mJsepSession->SortCodecs(comparator);
+ return NS_OK;
+}
+
+// Data channels won't work without a window, so in order for the C++ unit
+// tests to work (it doesn't have a window available) we ifdef the following
+// two implementations.
+NS_IMETHODIMP
+PeerConnectionImpl::EnsureDataConnection(uint16_t aNumstreams)
+{
+ PC_AUTO_ENTER_API_CALL(false);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (mDataConnection) {
+ CSFLogDebug(logTag,"%s DataConnection already connected",__FUNCTION__);
+ // Ignore the request to connect when already connected. This entire
+ // implementation is temporary. Ignore aNumstreams as it's merely advisory
+ // and we increase the number of streams dynamically as needed.
+ return NS_OK;
+ }
+ mDataConnection = new DataChannelConnection(this);
+ if (!mDataConnection->Init(5000, aNumstreams, true)) {
+ CSFLogError(logTag,"%s DataConnection Init Failed",__FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+ CSFLogDebug(logTag,"%s DataChannelConnection %p attached to %s",
+ __FUNCTION__, (void*) mDataConnection.get(), mHandle.c_str());
+#endif
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionImpl::GetDatachannelParameters(
+ const mozilla::JsepApplicationCodecDescription** datachannelCodec,
+ uint16_t* level) const {
+
+ auto trackPairs = mJsepSession->GetNegotiatedTrackPairs();
+ for (auto j = trackPairs.begin(); j != trackPairs.end(); ++j) {
+ JsepTrackPair& trackPair = *j;
+
+ bool sendDataChannel =
+ trackPair.mSending &&
+ trackPair.mSending->GetMediaType() == SdpMediaSection::kApplication;
+ bool recvDataChannel =
+ trackPair.mReceiving &&
+ trackPair.mReceiving->GetMediaType() == SdpMediaSection::kApplication;
+ (void)recvDataChannel;
+ MOZ_ASSERT(sendDataChannel == recvDataChannel);
+
+ if (sendDataChannel) {
+ // This will release assert if there is no such index, and that's ok
+ const JsepTrackEncoding& encoding =
+ trackPair.mSending->GetNegotiatedDetails()->GetEncoding(0);
+
+ if (encoding.GetCodecs().empty()) {
+ CSFLogError(logTag, "%s: Negotiated m=application with no codec. "
+ "This is likely to be broken.",
+ __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ for (const JsepCodecDescription* codec : encoding.GetCodecs()) {
+ if (codec->mType != SdpMediaSection::kApplication) {
+ CSFLogError(logTag, "%s: Codec type for m=application was %u, this "
+ "is a bug.",
+ __FUNCTION__,
+ static_cast<unsigned>(codec->mType));
+ MOZ_ASSERT(false, "Codec for m=application was not \"application\"");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (codec->mName != "webrtc-datachannel") {
+ CSFLogWarn(logTag, "%s: Codec for m=application was not "
+ "webrtc-datachannel (was instead %s). ",
+ __FUNCTION__,
+ codec->mName.c_str());
+ continue;
+ }
+
+ *datachannelCodec =
+ static_cast<const JsepApplicationCodecDescription*>(codec);
+ if (trackPair.mBundleLevel.isSome()) {
+ *level = static_cast<uint16_t>(*trackPair.mBundleLevel);
+ } else {
+ *level = static_cast<uint16_t>(trackPair.mLevel);
+ }
+ return NS_OK;
+ }
+ }
+ }
+
+ *datachannelCodec = nullptr;
+ *level = 0;
+ return NS_OK;
+}
+
+/* static */
+void
+PeerConnectionImpl::DeferredAddTrackToJsepSession(
+ const std::string& pcHandle,
+ SdpMediaSection::MediaType type,
+ const std::string& streamId,
+ const std::string& trackId)
+{
+ PeerConnectionWrapper wrapper(pcHandle);
+
+ if (wrapper.impl()) {
+ if (!PeerConnectionCtx::GetInstance()->isReady()) {
+ MOZ_CRASH("Why is DeferredAddTrackToJsepSession being executed when the "
+ "PeerConnectionCtx isn't ready?");
+ }
+ wrapper.impl()->AddTrackToJsepSession(type, streamId, trackId);
+ }
+}
+
+nsresult
+PeerConnectionImpl::AddTrackToJsepSession(SdpMediaSection::MediaType type,
+ const std::string& streamId,
+ const std::string& trackId)
+{
+ nsresult res = ConfigureJsepSessionCodecs();
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "Failed to configure codecs");
+ return res;
+ }
+
+ res = mJsepSession->AddTrack(
+ new JsepTrack(type, streamId, trackId, sdp::kSend));
+
+ if (NS_FAILED(res)) {
+ std::string errorString = mJsepSession->GetLastError();
+ CSFLogError(logTag, "%s (%s) : pc = %s, error = %s",
+ __FUNCTION__,
+ type == SdpMediaSection::kAudio ? "audio" : "video",
+ mHandle.c_str(),
+ errorString.c_str());
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionImpl::InitializeDataChannel()
+{
+ PC_AUTO_ENTER_API_CALL(false);
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+
+ const JsepApplicationCodecDescription* codec;
+ uint16_t level;
+ nsresult rv = GetDatachannelParameters(&codec, &level);
+
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ if (!codec) {
+ CSFLogDebug(logTag, "%s: We did not negotiate datachannel", __FUNCTION__);
+ return NS_OK;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ uint32_t channels = codec->mChannels;
+ if (channels > MAX_NUM_STREAMS) {
+ channels = MAX_NUM_STREAMS;
+ }
+
+ rv = EnsureDataConnection(channels);
+ if (NS_SUCCEEDED(rv)) {
+ uint16_t localport = 5000;
+ uint16_t remoteport = 0;
+ // The logic that reflects the remote payload type is what sets the remote
+ // port here.
+ if (!codec->GetPtAsInt(&remoteport)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // use the specified TransportFlow
+ RefPtr<TransportFlow> flow = mMedia->GetTransportFlow(level, false).get();
+ CSFLogDebug(logTag, "Transportflow[%u] = %p",
+ static_cast<unsigned>(level), flow.get());
+ if (flow) {
+ if (mDataConnection->ConnectViaTransportFlow(flow,
+ localport,
+ remoteport)) {
+ return NS_OK;
+ }
+ }
+ // If we inited the DataConnection, call Destroy() before releasing it
+ mDataConnection->Destroy();
+ }
+ mDataConnection = nullptr;
+#endif
+ return NS_ERROR_FAILURE;
+}
+
+already_AddRefed<nsDOMDataChannel>
+PeerConnectionImpl::CreateDataChannel(const nsAString& aLabel,
+ const nsAString& aProtocol,
+ uint16_t aType,
+ bool outOfOrderAllowed,
+ uint16_t aMaxTime,
+ uint16_t aMaxNum,
+ bool aExternalNegotiated,
+ uint16_t aStream,
+ ErrorResult &rv)
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<nsDOMDataChannel> result;
+ rv = CreateDataChannel(aLabel, aProtocol, aType, outOfOrderAllowed,
+ aMaxTime, aMaxNum, aExternalNegotiated,
+ aStream, getter_AddRefs(result));
+ return result.forget();
+#else
+ return nullptr;
+#endif
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::CreateDataChannel(const nsAString& aLabel,
+ const nsAString& aProtocol,
+ uint16_t aType,
+ bool outOfOrderAllowed,
+ uint16_t aMaxTime,
+ uint16_t aMaxNum,
+ bool aExternalNegotiated,
+ uint16_t aStream,
+ nsDOMDataChannel** aRetval)
+{
+ PC_AUTO_ENTER_API_CALL(false);
+ MOZ_ASSERT(aRetval);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<DataChannel> dataChannel;
+ DataChannelConnection::Type theType =
+ static_cast<DataChannelConnection::Type>(aType);
+
+ nsresult rv = EnsureDataConnection(WEBRTC_DATACHANNEL_STREAMS_DEFAULT);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ dataChannel = mDataConnection->Open(
+ NS_ConvertUTF16toUTF8(aLabel), NS_ConvertUTF16toUTF8(aProtocol), theType,
+ !outOfOrderAllowed,
+ aType == DataChannelConnection::PARTIAL_RELIABLE_REXMIT ? aMaxNum :
+ (aType == DataChannelConnection::PARTIAL_RELIABLE_TIMED ? aMaxTime : 0),
+ nullptr, nullptr, aExternalNegotiated, aStream
+ );
+ NS_ENSURE_TRUE(dataChannel,NS_ERROR_FAILURE);
+
+ CSFLogDebug(logTag, "%s: making DOMDataChannel", __FUNCTION__);
+
+ if (!mHaveDataStream) {
+
+ std::string streamId;
+ std::string trackId;
+
+ // Generate random ids because these aren't linked to any local streams.
+ if (!mUuidGen->Generate(&streamId)) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!mUuidGen->Generate(&trackId)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<JsepTrack> track(new JsepTrack(
+ mozilla::SdpMediaSection::kApplication,
+ streamId,
+ trackId,
+ sdp::kSend));
+
+ rv = mJsepSession->AddTrack(track);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to add application track.",
+ __FUNCTION__);
+ return rv;
+ }
+ mHaveDataStream = true;
+ OnNegotiationNeeded();
+ }
+ nsIDOMDataChannel *retval;
+ rv = NS_NewDOMDataChannel(dataChannel.forget(), mWindow, &retval);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ *aRetval = static_cast<nsDOMDataChannel*>(retval);
+#endif
+ return NS_OK;
+}
+
+// do_QueryObjectReferent() - Helps get PeerConnectionObserver from nsWeakPtr.
+//
+// nsWeakPtr deals in XPCOM interfaces, while webidl bindings are concrete objs.
+// TODO: Turn this into a central (template) function somewhere (Bug 939178)
+//
+// Without it, each weak-ref call in this file would look like this:
+//
+// nsCOMPtr<nsISupportsWeakReference> tmp = do_QueryReferent(mPCObserver);
+// if (!tmp) {
+// return;
+// }
+// RefPtr<nsSupportsWeakReference> tmp2 = do_QueryObject(tmp);
+// RefPtr<PeerConnectionObserver> pco = static_cast<PeerConnectionObserver*>(&*tmp2);
+
+static already_AddRefed<PeerConnectionObserver>
+do_QueryObjectReferent(nsIWeakReference* aRawPtr) {
+ nsCOMPtr<nsISupportsWeakReference> tmp = do_QueryReferent(aRawPtr);
+ if (!tmp) {
+ return nullptr;
+ }
+ RefPtr<nsSupportsWeakReference> tmp2 = do_QueryObject(tmp);
+ RefPtr<PeerConnectionObserver> tmp3 = static_cast<PeerConnectionObserver*>(&*tmp2);
+ return tmp3.forget();
+}
+
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+// Not a member function so that we don't need to keep the PC live.
+static void NotifyDataChannel_m(RefPtr<nsIDOMDataChannel> aChannel,
+ RefPtr<PeerConnectionObserver> aObserver)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ JSErrorResult rv;
+ RefPtr<nsDOMDataChannel> channel = static_cast<nsDOMDataChannel*>(&*aChannel);
+ aObserver->NotifyDataChannel(*channel, rv);
+ NS_DataChannelAppReady(aChannel);
+}
+#endif
+
+void
+PeerConnectionImpl::NotifyDataChannel(already_AddRefed<DataChannel> aChannel)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+
+ // XXXkhuey this is completely fucked up. We can't use RefPtr<DataChannel>
+ // here because DataChannel's AddRef/Release are non-virtual and not visible
+ // if !MOZILLA_INTERNAL_API, but this function leaks the DataChannel if
+ // !MOZILLA_INTERNAL_API because it never transfers the ref to
+ // NS_NewDOMDataChannel.
+ DataChannel* channel = aChannel.take();
+ MOZ_ASSERT(channel);
+
+ CSFLogDebug(logTag, "%s: channel: %p", __FUNCTION__, channel);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsCOMPtr<nsIDOMDataChannel> domchannel;
+ nsresult rv = NS_NewDOMDataChannel(already_AddRefed<DataChannel>(channel),
+ mWindow, getter_AddRefs(domchannel));
+ NS_ENSURE_SUCCESS_VOID(rv);
+
+ mHaveDataStream = true;
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return;
+ }
+
+ RUN_ON_THREAD(mThread,
+ WrapRunnableNM(NotifyDataChannel_m,
+ domchannel.get(),
+ pco),
+ NS_DISPATCH_NORMAL);
+#endif
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::CreateOffer(const RTCOfferOptions& aOptions)
+{
+ JsepOfferOptions options;
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // convert the RTCOfferOptions to JsepOfferOptions
+ if (aOptions.mOfferToReceiveAudio.WasPassed()) {
+ options.mOfferToReceiveAudio =
+ mozilla::Some(size_t(aOptions.mOfferToReceiveAudio.Value()));
+ }
+
+ if (aOptions.mOfferToReceiveVideo.WasPassed()) {
+ options.mOfferToReceiveVideo =
+ mozilla::Some(size_t(aOptions.mOfferToReceiveVideo.Value()));
+ }
+
+ options.mIceRestart = mozilla::Some(aOptions.mIceRestart);
+
+ if (aOptions.mMozDontOfferDataChannel.WasPassed()) {
+ options.mDontOfferDataChannel =
+ mozilla::Some(aOptions.mMozDontOfferDataChannel.Value());
+ }
+#endif
+ return CreateOffer(options);
+}
+
+static void DeferredCreateOffer(const std::string& aPcHandle,
+ const JsepOfferOptions& aOptions) {
+ PeerConnectionWrapper wrapper(aPcHandle);
+
+ if (wrapper.impl()) {
+ if (!PeerConnectionCtx::GetInstance()->isReady()) {
+ MOZ_CRASH("Why is DeferredCreateOffer being executed when the "
+ "PeerConnectionCtx isn't ready?");
+ }
+ wrapper.impl()->CreateOffer(aOptions);
+ }
+}
+
+// Used by unit tests and the IDL CreateOffer.
+NS_IMETHODIMP
+PeerConnectionImpl::CreateOffer(const JsepOfferOptions& aOptions)
+{
+ PC_AUTO_ENTER_API_CALL(true);
+ bool restartIce = aOptions.mIceRestart.isSome() && *(aOptions.mIceRestart);
+ if (!restartIce &&
+ mMedia->GetIceRestartState() ==
+ PeerConnectionMedia::ICE_RESTART_PROVISIONAL) {
+ RollbackIceRestart();
+ }
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return NS_OK;
+ }
+
+ if (!PeerConnectionCtx::GetInstance()->isReady()) {
+ // Uh oh. We're not ready yet. Enqueue this operation.
+ PeerConnectionCtx::GetInstance()->queueJSEPOperation(
+ WrapRunnableNM(DeferredCreateOffer, mHandle, aOptions));
+ STAMP_TIMECARD(mTimeCard, "Deferring CreateOffer (not ready)");
+ return NS_OK;
+ }
+
+ CSFLogDebug(logTag, "CreateOffer()");
+
+ nsresult nrv;
+ if (restartIce && !mJsepSession->GetLocalDescription().empty()) {
+ // If restart is requested and a restart is already in progress, we
+ // need to make room for the restart request so we either rollback
+ // or finalize to "clear" the previous restart.
+ if (mMedia->GetIceRestartState() ==
+ PeerConnectionMedia::ICE_RESTART_PROVISIONAL) {
+ // we're mid-restart and can rollback
+ RollbackIceRestart();
+ } else if (mMedia->GetIceRestartState() ==
+ PeerConnectionMedia::ICE_RESTART_COMMITTED) {
+ // we're mid-restart and can't rollback, finalize restart even
+ // though we're not really ready yet
+ FinalizeIceRestart();
+ }
+
+ CSFLogInfo(logTag, "Offerer restarting ice");
+ nrv = SetupIceRestart();
+ if (NS_FAILED(nrv)) {
+ CSFLogError(logTag, "%s: SetupIceRestart failed, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(nrv));
+ return nrv;
+ }
+ }
+
+ nrv = ConfigureJsepSessionCodecs();
+ if (NS_FAILED(nrv)) {
+ CSFLogError(logTag, "Failed to configure codecs");
+ return nrv;
+ }
+
+ STAMP_TIMECARD(mTimeCard, "Create Offer");
+
+ std::string offer;
+
+ nrv = mJsepSession->CreateOffer(aOptions, &offer);
+ JSErrorResult rv;
+ if (NS_FAILED(nrv)) {
+ Error error;
+ switch (nrv) {
+ case NS_ERROR_UNEXPECTED:
+ error = kInvalidState;
+ break;
+ default:
+ error = kInternalError;
+ }
+ std::string errorString = mJsepSession->GetLastError();
+
+ CSFLogError(logTag, "%s: pc = %s, error = %s",
+ __FUNCTION__, mHandle.c_str(), errorString.c_str());
+ pco->OnCreateOfferError(error, ObString(errorString.c_str()), rv);
+ } else {
+ pco->OnCreateOfferSuccess(ObString(offer.c_str()), rv);
+ }
+
+ UpdateSignalingState();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::CreateAnswer()
+{
+ PC_AUTO_ENTER_API_CALL(true);
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return NS_OK;
+ }
+
+ CSFLogDebug(logTag, "CreateAnswer()");
+
+ nsresult nrv;
+ if (mJsepSession->RemoteIceIsRestarting()) {
+ if (mMedia->GetIceRestartState() ==
+ PeerConnectionMedia::ICE_RESTART_COMMITTED) {
+ FinalizeIceRestart();
+ } else if (!mMedia->IsIceRestarting()) {
+ CSFLogInfo(logTag, "Answerer restarting ice");
+ nrv = SetupIceRestart();
+ if (NS_FAILED(nrv)) {
+ CSFLogError(logTag, "%s: SetupIceRestart failed, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(nrv));
+ return nrv;
+ }
+ }
+ }
+
+ STAMP_TIMECARD(mTimeCard, "Create Answer");
+ // TODO(bug 1098015): Once RTCAnswerOptions is standardized, we'll need to
+ // add it as a param to CreateAnswer, and convert it here.
+ JsepAnswerOptions options;
+ std::string answer;
+
+ nrv = mJsepSession->CreateAnswer(options, &answer);
+ JSErrorResult rv;
+ if (NS_FAILED(nrv)) {
+ Error error;
+ switch (nrv) {
+ case NS_ERROR_UNEXPECTED:
+ error = kInvalidState;
+ break;
+ default:
+ error = kInternalError;
+ }
+ std::string errorString = mJsepSession->GetLastError();
+
+ CSFLogError(logTag, "%s: pc = %s, error = %s",
+ __FUNCTION__, mHandle.c_str(), errorString.c_str());
+ pco->OnCreateAnswerError(error, ObString(errorString.c_str()), rv);
+ } else {
+ pco->OnCreateAnswerSuccess(ObString(answer.c_str()), rv);
+ }
+
+ UpdateSignalingState();
+
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionImpl::SetupIceRestart()
+{
+ if (mMedia->IsIceRestarting()) {
+ CSFLogError(logTag, "%s: ICE already restarting",
+ __FUNCTION__);
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ std::string ufrag = mMedia->ice_ctx()->GetNewUfrag();
+ std::string pwd = mMedia->ice_ctx()->GetNewPwd();
+ if (ufrag.empty() || pwd.empty()) {
+ CSFLogError(logTag, "%s: Bad ICE credentials (ufrag:'%s'/pwd:'%s')",
+ __FUNCTION__,
+ ufrag.c_str(), pwd.c_str());
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // hold on to the current ice creds in case of rollback
+ mPreviousIceUfrag = mJsepSession->GetUfrag();
+ mPreviousIcePwd = mJsepSession->GetPwd();
+ mMedia->BeginIceRestart(ufrag, pwd);
+
+ nsresult nrv = mJsepSession->SetIceCredentials(ufrag, pwd);
+ if (NS_FAILED(nrv)) {
+ CSFLogError(logTag, "%s: Couldn't set ICE credentials, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(nrv));
+ return nrv;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionImpl::RollbackIceRestart()
+{
+ mMedia->RollbackIceRestart();
+ // put back the previous ice creds
+ nsresult nrv = mJsepSession->SetIceCredentials(mPreviousIceUfrag,
+ mPreviousIcePwd);
+ if (NS_FAILED(nrv)) {
+ CSFLogError(logTag, "%s: Couldn't set ICE credentials, res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(nrv));
+ return nrv;
+ }
+ mPreviousIceUfrag = "";
+ mPreviousIcePwd = "";
+
+ return NS_OK;
+}
+
+void
+PeerConnectionImpl::FinalizeIceRestart()
+{
+ mMedia->FinalizeIceRestart();
+ // clear the previous ice creds since they are no longer needed
+ mPreviousIceUfrag = "";
+ mPreviousIcePwd = "";
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::SetLocalDescription(int32_t aAction, const char* aSDP)
+{
+ PC_AUTO_ENTER_API_CALL(true);
+
+ if (!aSDP) {
+ CSFLogError(logTag, "%s - aSDP is NULL", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ JSErrorResult rv;
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return NS_OK;
+ }
+
+ STAMP_TIMECARD(mTimeCard, "Set Local Description");
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ bool isolated = mMedia->AnyLocalTrackHasPeerIdentity();
+ mPrivacyRequested = mPrivacyRequested || isolated;
+#endif
+
+ mLocalRequestedSDP = aSDP;
+
+ JsepSdpType sdpType;
+ switch (aAction) {
+ case IPeerConnection::kActionOffer:
+ sdpType = mozilla::kJsepSdpOffer;
+ break;
+ case IPeerConnection::kActionAnswer:
+ sdpType = mozilla::kJsepSdpAnswer;
+ break;
+ case IPeerConnection::kActionPRAnswer:
+ sdpType = mozilla::kJsepSdpPranswer;
+ break;
+ case IPeerConnection::kActionRollback:
+ sdpType = mozilla::kJsepSdpRollback;
+ break;
+ default:
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+
+ }
+ nsresult nrv = mJsepSession->SetLocalDescription(sdpType,
+ mLocalRequestedSDP);
+ if (NS_FAILED(nrv)) {
+ Error error;
+ switch (nrv) {
+ case NS_ERROR_INVALID_ARG:
+ error = kInvalidSessionDescription;
+ break;
+ case NS_ERROR_UNEXPECTED:
+ error = kInvalidState;
+ break;
+ default:
+ error = kInternalError;
+ }
+
+ std::string errorString = mJsepSession->GetLastError();
+ CSFLogError(logTag, "%s: pc = %s, error = %s",
+ __FUNCTION__, mHandle.c_str(), errorString.c_str());
+ pco->OnSetLocalDescriptionError(error, ObString(errorString.c_str()), rv);
+ } else {
+ pco->OnSetLocalDescriptionSuccess(rv);
+ }
+
+ UpdateSignalingState(sdpType == mozilla::kJsepSdpRollback);
+ return NS_OK;
+}
+
+static void DeferredSetRemote(const std::string& aPcHandle,
+ int32_t aAction,
+ const std::string& aSdp) {
+ PeerConnectionWrapper wrapper(aPcHandle);
+
+ if (wrapper.impl()) {
+ if (!PeerConnectionCtx::GetInstance()->isReady()) {
+ MOZ_CRASH("Why is DeferredSetRemote being executed when the "
+ "PeerConnectionCtx isn't ready?");
+ }
+ wrapper.impl()->SetRemoteDescription(aAction, aSdp.c_str());
+ }
+}
+
+static void StartTrack(MediaStream* aSource,
+ TrackID aTrackId,
+ nsAutoPtr<MediaSegment>&& aSegment) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ class Message : public ControlMessage {
+ public:
+ Message(MediaStream* aStream,
+ TrackID aTrack,
+ nsAutoPtr<MediaSegment>&& aSegment)
+ : ControlMessage(aStream),
+ track_id_(aTrack),
+ segment_(aSegment) {}
+
+ virtual void Run() override {
+ TrackRate track_rate = segment_->GetType() == MediaSegment::AUDIO ?
+ WEBRTC_DEFAULT_SAMPLE_RATE : mStream->GraphRate();
+ StreamTime current_end = mStream->GetTracksEnd();
+ TrackTicks current_ticks =
+ mStream->TimeToTicksRoundUp(track_rate, current_end);
+
+ // Add a track 'now' to avoid possible underrun, especially if we add
+ // a track "later".
+
+ if (current_end != 0L) {
+ CSFLogDebug(logTag, "added track @ %u -> %f",
+ static_cast<unsigned>(current_end),
+ mStream->StreamTimeToSeconds(current_end));
+ }
+
+ // To avoid assertions, we need to insert a dummy segment that covers up
+ // to the "start" time for the track
+ segment_->AppendNullData(current_ticks);
+ if (segment_->GetType() == MediaSegment::AUDIO) {
+ mStream->AsSourceStream()->AddAudioTrack(
+ track_id_,
+ WEBRTC_DEFAULT_SAMPLE_RATE,
+ 0,
+ static_cast<AudioSegment*>(segment_.forget()));
+ } else {
+ mStream->AsSourceStream()->AddTrack(track_id_, 0, segment_.forget());
+ }
+ }
+ private:
+ TrackID track_id_;
+ nsAutoPtr<MediaSegment> segment_;
+ };
+
+ aSource->GraphImpl()->AppendMessage(
+ MakeUnique<Message>(aSource, aTrackId, Move(aSegment)));
+ CSFLogInfo(logTag, "Dispatched track-add for track id %u on stream %p",
+ aTrackId, aSource);
+#endif
+}
+
+
+nsresult
+PeerConnectionImpl::CreateNewRemoteTracks(RefPtr<PeerConnectionObserver>& aPco)
+{
+ JSErrorResult jrv;
+
+ std::vector<RefPtr<JsepTrack>> newTracks =
+ mJsepSession->GetRemoteTracksAdded();
+
+ // Group new tracks by stream id
+ std::map<std::string, std::vector<RefPtr<JsepTrack>>> tracksByStreamId;
+ for (auto i = newTracks.begin(); i != newTracks.end(); ++i) {
+ RefPtr<JsepTrack> track = *i;
+
+ if (track->GetMediaType() == mozilla::SdpMediaSection::kApplication) {
+ // Ignore datachannel
+ continue;
+ }
+
+ tracksByStreamId[track->GetStreamId()].push_back(track);
+ }
+
+ for (auto i = tracksByStreamId.begin(); i != tracksByStreamId.end(); ++i) {
+ std::string streamId = i->first;
+ std::vector<RefPtr<JsepTrack>>& tracks = i->second;
+
+ bool newStream = false;
+ RefPtr<RemoteSourceStreamInfo> info =
+ mMedia->GetRemoteStreamById(streamId);
+ if (!info) {
+ newStream = true;
+ nsresult nrv = CreateRemoteSourceStreamInfo(&info, streamId);
+ if (NS_FAILED(nrv)) {
+ aPco->OnSetRemoteDescriptionError(
+ kInternalError,
+ ObString("CreateRemoteSourceStreamInfo failed"),
+ jrv);
+ return nrv;
+ }
+
+ nrv = mMedia->AddRemoteStream(info);
+ if (NS_FAILED(nrv)) {
+ aPco->OnSetRemoteDescriptionError(
+ kInternalError,
+ ObString("AddRemoteStream failed"),
+ jrv);
+ return nrv;
+ }
+
+ CSFLogDebug(logTag, "Added remote stream %s", info->GetId().c_str());
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ info->GetMediaStream()->AssignId(NS_ConvertUTF8toUTF16(streamId.c_str()));
+ info->GetMediaStream()->SetLogicalStreamStartTime(
+ info->GetMediaStream()->GetPlaybackStream()->GetCurrentTime());
+#else
+ info->GetMediaStream()->AssignId((streamId));
+#endif
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ Sequence<OwningNonNull<DOMMediaStream>> streams;
+ if (!streams.AppendElement(OwningNonNull<DOMMediaStream>(
+ *info->GetMediaStream()),
+ fallible)) {
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ // Set the principal used for creating the tracks. This makes the stream
+ // data (audio/video samples) accessible to the receiving page. We're
+ // only certain that privacy hasn't been requested if we're connected.
+ nsCOMPtr<nsIPrincipal> principal;
+ nsIDocument* doc = GetWindow()->GetExtantDoc();
+ MOZ_ASSERT(doc);
+ if (mDtlsConnected && !PrivacyRequested()) {
+ principal = doc->NodePrincipal();
+ } else {
+ // we're either certain that we need isolation for the streams, OR
+ // we're not sure and we can fix the stream in SetDtlsConnected
+ principal = nsNullPrincipal::CreateWithInheritedAttributes(doc->NodePrincipal());
+ }
+#endif
+
+ // We need to select unique ids, just use max + 1
+ TrackID maxTrackId = 0;
+ {
+ nsTArray<RefPtr<dom::MediaStreamTrack>> domTracks;
+ info->GetMediaStream()->GetTracks(domTracks);
+ for (auto& track : domTracks) {
+ maxTrackId = std::max(maxTrackId, track->mTrackID);
+ }
+ }
+
+ for (RefPtr<JsepTrack>& track : tracks) {
+ std::string webrtcTrackId(track->GetTrackId());
+ if (!info->HasTrack(webrtcTrackId)) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<RemoteTrackSource> source =
+ new RemoteTrackSource(principal, nsString());
+#else
+ RefPtr<MediaStreamTrackSource> source = new MediaStreamTrackSource();
+#endif
+ TrackID trackID = ++maxTrackId;
+ RefPtr<MediaStreamTrack> domTrack;
+ nsAutoPtr<MediaSegment> segment;
+ if (track->GetMediaType() == SdpMediaSection::kAudio) {
+ domTrack =
+ info->GetMediaStream()->CreateDOMTrack(trackID,
+ MediaSegment::AUDIO,
+ source);
+ info->GetMediaStream()->AddTrackInternal(domTrack);
+ segment = new AudioSegment;
+ } else {
+ domTrack =
+ info->GetMediaStream()->CreateDOMTrack(trackID,
+ MediaSegment::VIDEO,
+ source);
+ info->GetMediaStream()->AddTrackInternal(domTrack);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ segment = new VideoSegment;
+#endif
+ }
+
+ StartTrack(info->GetMediaStream()->GetInputStream()->AsSourceStream(),
+ trackID, Move(segment));
+ info->AddTrack(webrtcTrackId, domTrack);
+ CSFLogDebug(logTag, "Added remote track %s/%s",
+ info->GetId().c_str(), webrtcTrackId.c_str());
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ domTrack->AssignId(NS_ConvertUTF8toUTF16(webrtcTrackId.c_str()));
+ aPco->OnAddTrack(*domTrack, streams, jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, ": OnAddTrack(%s) failed! Error: %u",
+ webrtcTrackId.c_str(),
+ jrv.ErrorCodeAsInt());
+ }
+#endif
+ }
+ }
+
+ if (newStream) {
+ aPco->OnAddStream(*info->GetMediaStream(), jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, ": OnAddStream() failed! Error: %u",
+ jrv.ErrorCodeAsInt());
+ }
+ }
+ }
+ return NS_OK;
+}
+
+void
+PeerConnectionImpl::RemoveOldRemoteTracks(RefPtr<PeerConnectionObserver>& aPco)
+{
+ JSErrorResult jrv;
+
+ std::vector<RefPtr<JsepTrack>> removedTracks =
+ mJsepSession->GetRemoteTracksRemoved();
+
+ for (auto i = removedTracks.begin(); i != removedTracks.end(); ++i) {
+ const std::string& streamId = (*i)->GetStreamId();
+ const std::string& trackId = (*i)->GetTrackId();
+
+ RefPtr<RemoteSourceStreamInfo> info = mMedia->GetRemoteStreamById(streamId);
+ if (!info) {
+ MOZ_ASSERT(false, "A stream/track was removed that wasn't in PCMedia. "
+ "This is a bug.");
+ continue;
+ }
+
+ mMedia->RemoveRemoteTrack(streamId, trackId);
+
+ DOMMediaStream* stream = info->GetMediaStream();
+ nsTArray<RefPtr<MediaStreamTrack>> tracks;
+ stream->GetTracks(tracks);
+ for (auto& track : tracks) {
+ if (PeerConnectionImpl::GetTrackId(*track) == trackId) {
+ aPco->OnRemoveTrack(*track, jrv);
+ break;
+ }
+ }
+
+ // We might be holding the last ref, but that's ok.
+ if (!info->GetTrackCount()) {
+ aPco->OnRemoveStream(*stream, jrv);
+ }
+ }
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::SetRemoteDescription(int32_t action, const char* aSDP)
+{
+ PC_AUTO_ENTER_API_CALL(true);
+
+ if (!aSDP) {
+ CSFLogError(logTag, "%s - aSDP is NULL", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ JSErrorResult jrv;
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return NS_OK;
+ }
+
+ if (action == IPeerConnection::kActionOffer) {
+ if (!PeerConnectionCtx::GetInstance()->isReady()) {
+ // Uh oh. We're not ready yet. Enqueue this operation. (This must be a
+ // remote offer, or else we would not have gotten this far)
+ PeerConnectionCtx::GetInstance()->queueJSEPOperation(
+ WrapRunnableNM(DeferredSetRemote,
+ mHandle,
+ action,
+ std::string(aSDP)));
+ STAMP_TIMECARD(mTimeCard, "Deferring SetRemote (not ready)");
+ return NS_OK;
+ }
+
+ nsresult nrv = ConfigureJsepSessionCodecs();
+ if (NS_FAILED(nrv)) {
+ CSFLogError(logTag, "Failed to configure codecs");
+ return nrv;
+ }
+ }
+
+ STAMP_TIMECARD(mTimeCard, "Set Remote Description");
+
+ mRemoteRequestedSDP = aSDP;
+ JsepSdpType sdpType;
+ switch (action) {
+ case IPeerConnection::kActionOffer:
+ sdpType = mozilla::kJsepSdpOffer;
+ break;
+ case IPeerConnection::kActionAnswer:
+ sdpType = mozilla::kJsepSdpAnswer;
+ break;
+ case IPeerConnection::kActionPRAnswer:
+ sdpType = mozilla::kJsepSdpPranswer;
+ break;
+ case IPeerConnection::kActionRollback:
+ sdpType = mozilla::kJsepSdpRollback;
+ break;
+ default:
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult nrv = mJsepSession->SetRemoteDescription(sdpType,
+ mRemoteRequestedSDP);
+ if (NS_FAILED(nrv)) {
+ Error error;
+ switch (nrv) {
+ case NS_ERROR_INVALID_ARG:
+ error = kInvalidSessionDescription;
+ break;
+ case NS_ERROR_UNEXPECTED:
+ error = kInvalidState;
+ break;
+ default:
+ error = kInternalError;
+ }
+
+ std::string errorString = mJsepSession->GetLastError();
+ CSFLogError(logTag, "%s: pc = %s, error = %s",
+ __FUNCTION__, mHandle.c_str(), errorString.c_str());
+ pco->OnSetRemoteDescriptionError(error, ObString(errorString.c_str()), jrv);
+ } else {
+ nrv = CreateNewRemoteTracks(pco);
+ if (NS_FAILED(nrv)) {
+ // aPco was already notified, just return early.
+ return NS_OK;
+ }
+
+ RemoveOldRemoteTracks(pco);
+
+ pco->OnSetRemoteDescriptionSuccess(jrv);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ startCallTelem();
+#endif
+ }
+
+ UpdateSignalingState(sdpType == mozilla::kJsepSdpRollback);
+ return NS_OK;
+}
+
+// WebRTC uses highres time relative to the UNIX epoch (Jan 1, 1970, UTC).
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+nsresult
+PeerConnectionImpl::GetTimeSinceEpoch(DOMHighResTimeStamp *result) {
+ MOZ_ASSERT(NS_IsMainThread());
+ Performance *perf = mWindow->GetPerformance();
+ NS_ENSURE_TRUE(perf && perf->Timing(), NS_ERROR_UNEXPECTED);
+ *result = perf->Now() + perf->Timing()->NavigationStart();
+ return NS_OK;
+}
+
+class RTCStatsReportInternalConstruct : public RTCStatsReportInternal {
+public:
+ RTCStatsReportInternalConstruct(const nsString &pcid, DOMHighResTimeStamp now) {
+ mPcid = pcid;
+ mInboundRTPStreamStats.Construct();
+ mOutboundRTPStreamStats.Construct();
+ mMediaStreamTrackStats.Construct();
+ mMediaStreamStats.Construct();
+ mTransportStats.Construct();
+ mIceComponentStats.Construct();
+ mIceCandidatePairStats.Construct();
+ mIceCandidateStats.Construct();
+ mCodecStats.Construct();
+ mTimestamp.Construct(now);
+ }
+};
+#endif
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetStats(MediaStreamTrack *aSelector) {
+ PC_AUTO_ENTER_API_CALL(true);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (!mMedia) {
+ // Since we zero this out before the d'tor, we should check.
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsAutoPtr<RTCStatsQuery> query(new RTCStatsQuery(false));
+
+ nsresult rv = BuildStatsQuery_m(aSelector, query.get());
+
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RUN_ON_THREAD(mSTSThread,
+ WrapRunnableNM(&PeerConnectionImpl::GetStatsForPCObserver_s,
+ mHandle,
+ query),
+ NS_DISPATCH_NORMAL);
+#endif
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::AddIceCandidate(const char* aCandidate, const char* aMid, unsigned short aLevel) {
+ PC_AUTO_ENTER_API_CALL(true);
+
+ JSErrorResult rv;
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return NS_OK;
+ }
+
+ STAMP_TIMECARD(mTimeCard, "Add Ice Candidate");
+
+ CSFLogDebug(logTag, "AddIceCandidate: %s", aCandidate);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // When remote candidates are added before our ICE ctx is up and running
+ // (the transition to New is async through STS, so this is not impossible),
+ // we won't record them as trickle candidates. Is this what we want?
+ if(!mIceStartTime.IsNull()) {
+ TimeDuration timeDelta = TimeStamp::Now() - mIceStartTime;
+ if (mIceConnectionState == PCImplIceConnectionState::Failed) {
+ Telemetry::Accumulate(Telemetry::WEBRTC_ICE_LATE_TRICKLE_ARRIVAL_TIME,
+ timeDelta.ToMilliseconds());
+ } else {
+ Telemetry::Accumulate(Telemetry::WEBRTC_ICE_ON_TIME_TRICKLE_ARRIVAL_TIME,
+ timeDelta.ToMilliseconds());
+ }
+ }
+#endif
+
+ nsresult res = mJsepSession->AddRemoteIceCandidate(aCandidate, aMid, aLevel);
+
+ if (NS_SUCCEEDED(res)) {
+ // We do not bother PCMedia about this before offer/answer concludes.
+ // Once offer/answer concludes, PCMedia will extract these candidates from
+ // the remote SDP.
+ if (mSignalingState == PCImplSignalingState::SignalingStable) {
+ mMedia->AddIceCandidate(aCandidate, aMid, aLevel);
+ }
+ pco->OnAddIceCandidateSuccess(rv);
+ } else {
+ ++mAddCandidateErrorCount;
+ Error error;
+ switch (res) {
+ case NS_ERROR_UNEXPECTED:
+ error = kInvalidState;
+ break;
+ case NS_ERROR_INVALID_ARG:
+ error = kInvalidCandidate;
+ break;
+ default:
+ error = kInternalError;
+ }
+
+ std::string errorString = mJsepSession->GetLastError();
+
+ CSFLogError(logTag, "Failed to incorporate remote candidate into SDP:"
+ " res = %u, candidate = %s, level = %u, error = %s",
+ static_cast<unsigned>(res),
+ aCandidate,
+ static_cast<unsigned>(aLevel),
+ errorString.c_str());
+
+ pco->OnAddIceCandidateError(error, ObString(errorString.c_str()), rv);
+ }
+
+ UpdateSignalingState();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::CloseStreams() {
+ PC_AUTO_ENTER_API_CALL(false);
+
+ return NS_OK;
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+nsresult
+PeerConnectionImpl::SetPeerIdentity(const nsAString& aPeerIdentity)
+{
+ PC_AUTO_ENTER_API_CALL(true);
+ MOZ_ASSERT(!aPeerIdentity.IsEmpty());
+
+ // once set, this can't be changed
+ if (mPeerIdentity) {
+ if (!mPeerIdentity->Equals(aPeerIdentity)) {
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ mPeerIdentity = new PeerIdentity(aPeerIdentity);
+ nsIDocument* doc = GetWindow()->GetExtantDoc();
+ if (!doc) {
+ CSFLogInfo(logTag, "Can't update principal on streams; document gone");
+ return NS_ERROR_FAILURE;
+ }
+ MediaStreamTrack* allTracks = nullptr;
+ mMedia->UpdateSinkIdentity_m(allTracks, doc->NodePrincipal(), mPeerIdentity);
+ }
+ return NS_OK;
+}
+#endif
+
+nsresult
+PeerConnectionImpl::SetDtlsConnected(bool aPrivacyRequested)
+{
+ PC_AUTO_ENTER_API_CALL(false);
+
+ // For this, as with mPrivacyRequested, once we've connected to a peer, we
+ // fixate on that peer. Dealing with multiple peers or connections is more
+ // than this run-down wreck of an object can handle.
+ // Besides, this is only used to say if we have been connected ever.
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (!mPrivacyRequested && !aPrivacyRequested && !mDtlsConnected) {
+ // now we know that privacy isn't needed for sure
+ nsIDocument* doc = GetWindow()->GetExtantDoc();
+ if (!doc) {
+ CSFLogInfo(logTag, "Can't update principal on streams; document gone");
+ return NS_ERROR_FAILURE;
+ }
+ mMedia->UpdateRemoteStreamPrincipals_m(doc->NodePrincipal());
+ }
+#endif
+ mDtlsConnected = true;
+ mPrivacyRequested = mPrivacyRequested || aPrivacyRequested;
+ return NS_OK;
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+void
+PeerConnectionImpl::PrincipalChanged(MediaStreamTrack* aTrack) {
+ nsIDocument* doc = GetWindow()->GetExtantDoc();
+ if (doc) {
+ mMedia->UpdateSinkIdentity_m(aTrack, doc->NodePrincipal(), mPeerIdentity);
+ } else {
+ CSFLogInfo(logTag, "Can't update sink principal; document gone");
+ }
+}
+#endif
+
+std::string
+PeerConnectionImpl::GetTrackId(const MediaStreamTrack& aTrack)
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsString wideTrackId;
+ aTrack.GetId(wideTrackId);
+ return NS_ConvertUTF16toUTF8(wideTrackId).get();
+#else
+ return aTrack.GetId();
+#endif
+}
+
+std::string
+PeerConnectionImpl::GetStreamId(const DOMMediaStream& aStream)
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsString wideStreamId;
+ aStream.GetId(wideStreamId);
+ return NS_ConvertUTF16toUTF8(wideStreamId).get();
+#else
+ return aStream.GetId();
+#endif
+}
+
+void
+PeerConnectionImpl::OnMediaError(const std::string& aError)
+{
+ CSFLogError(logTag, "Encountered media error! %s", aError.c_str());
+ // TODO: Let content know about this somehow.
+}
+
+nsresult
+PeerConnectionImpl::AddTrack(MediaStreamTrack& aTrack,
+ const Sequence<OwningNonNull<DOMMediaStream>>& aStreams)
+{
+ PC_AUTO_ENTER_API_CALL(true);
+
+ if (!aStreams.Length()) {
+ CSFLogError(logTag, "%s: At least one stream arg required", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ return AddTrack(aTrack, aStreams[0]);
+}
+
+nsresult
+PeerConnectionImpl::AddTrack(MediaStreamTrack& aTrack,
+ DOMMediaStream& aMediaStream)
+{
+ std::string streamId = PeerConnectionImpl::GetStreamId(aMediaStream);
+ std::string trackId = PeerConnectionImpl::GetTrackId(aTrack);
+ nsresult res = mMedia->AddTrack(aMediaStream, streamId, aTrack, trackId);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+
+ CSFLogDebug(logTag, "Added track (%s) to stream %s",
+ trackId.c_str(), streamId.c_str());
+
+ aTrack.AddPrincipalChangeObserver(this);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ PrincipalChanged(&aTrack);
+#endif
+
+ if (aTrack.AsAudioStreamTrack()) {
+ res = AddTrackToJsepSession(SdpMediaSection::kAudio, streamId, trackId);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+ mNumAudioStreams++;
+ }
+
+ if (aTrack.AsVideoStreamTrack()) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (!Preferences::GetBool("media.peerconnection.video.enabled", true)) {
+ // Before this code was moved, this would silently ignore just like it
+ // does now. Is this actually what we want to do?
+ return NS_OK;
+ }
+#endif
+
+ res = AddTrackToJsepSession(SdpMediaSection::kVideo, streamId, trackId);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+ mNumVideoStreams++;
+ }
+ OnNegotiationNeeded();
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionImpl::SelectSsrc(MediaStreamTrack& aRecvTrack,
+ unsigned short aSsrcIndex)
+{
+ for (size_t i = 0; i < mMedia->RemoteStreamsLength(); ++i) {
+ if (mMedia->GetRemoteStreamByIndex(i)->GetMediaStream()->
+ HasTrack(aRecvTrack)) {
+ auto& pipelines = mMedia->GetRemoteStreamByIndex(i)->GetPipelines();
+ std::string trackId = PeerConnectionImpl::GetTrackId(aRecvTrack);
+ auto it = pipelines.find(trackId);
+ if (it != pipelines.end()) {
+ it->second->SelectSsrc_m(aSsrcIndex);
+ }
+ }
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::RemoveTrack(MediaStreamTrack& aTrack) {
+ PC_AUTO_ENTER_API_CALL(true);
+
+ std::string trackId = PeerConnectionImpl::GetTrackId(aTrack);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsString wideTrackId;
+ aTrack.GetId(wideTrackId);
+ for (size_t i = 0; i < mDTMFStates.Length(); ++i) {
+ if (mDTMFStates[i]->mTrackId == wideTrackId) {
+ mDTMFStates[i]->mSendTimer->Cancel();
+ mDTMFStates.RemoveElementAt(i);
+ break;
+ }
+ }
+#endif
+
+ RefPtr<LocalSourceStreamInfo> info = media()->GetLocalStreamByTrackId(trackId);
+
+ if (!info) {
+ CSFLogError(logTag, "%s: Unknown stream", __FUNCTION__);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ nsresult rv =
+ mJsepSession->RemoveTrack(info->GetId(), trackId);
+
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Unknown stream/track ids %s %s",
+ __FUNCTION__,
+ info->GetId().c_str(),
+ trackId.c_str());
+ return rv;
+ }
+
+ media()->RemoveLocalTrack(info->GetId(), trackId);
+
+ aTrack.RemovePrincipalChangeObserver(this);
+
+ OnNegotiationNeeded();
+
+ return NS_OK;
+}
+
+static int GetDTMFToneCode(uint16_t c)
+{
+ const char* DTMF_TONECODES = "0123456789*#ABCD";
+
+ if (c == ',') {
+ // , is a special character indicating a 2 second delay
+ return -1;
+ }
+
+ const char* i = strchr(DTMF_TONECODES, c);
+ MOZ_ASSERT(i);
+ return i - DTMF_TONECODES;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::InsertDTMF(mozilla::dom::RTCRtpSender& sender,
+ const nsAString& tones, uint32_t duration,
+ uint32_t interToneGap) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ PC_AUTO_ENTER_API_CALL(false);
+
+ // Check values passed in from PeerConnection.js
+ MOZ_ASSERT(duration >= 40, "duration must be at least 40");
+ MOZ_ASSERT(duration <= 6000, "duration must be at most 6000");
+ MOZ_ASSERT(interToneGap >= 30, "interToneGap must be at least 30");
+
+ JSErrorResult jrv;
+
+ // Retrieve track
+ RefPtr<MediaStreamTrack> mst = sender.GetTrack(jrv);
+ if (jrv.Failed()) {
+ NS_WARNING("Failed to retrieve track for RTCRtpSender!");
+ return jrv.StealNSResult();
+ }
+
+ nsString senderTrackId;
+ mst->GetId(senderTrackId);
+
+ // Attempt to locate state for the DTMFSender
+ RefPtr<DTMFState> state;
+ for (auto& dtmfState : mDTMFStates) {
+ if (dtmfState->mTrackId == senderTrackId) {
+ state = dtmfState;
+ break;
+ }
+ }
+
+ // No state yet, create a new one
+ if (!state) {
+ state = *mDTMFStates.AppendElement(new DTMFState);
+ state->mPeerConnectionImpl = this;
+ state->mTrackId = senderTrackId;
+ state->mSendTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+ MOZ_ASSERT(state->mSendTimer);
+ }
+ MOZ_ASSERT(state);
+
+ auto trackPairs = mJsepSession->GetNegotiatedTrackPairs();
+ state->mLevel = -1;
+ for (auto& trackPair : trackPairs) {
+ if (state->mTrackId.EqualsASCII(trackPair.mSending->GetTrackId().c_str())) {
+ if (trackPair.mBundleLevel.isSome()) {
+ state->mLevel = *trackPair.mBundleLevel;
+ } else {
+ state->mLevel = trackPair.mLevel;
+ }
+ break;
+ }
+ }
+
+ state->mTones = tones;
+ state->mDuration = duration;
+ state->mInterToneGap = interToneGap;
+ if (!state->mTones.IsEmpty()) {
+ state->mSendTimer->InitWithCallback(state, 0, nsITimer::TYPE_ONE_SHOT);
+ }
+#endif
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetDTMFToneBuffer(mozilla::dom::RTCRtpSender& sender,
+ nsAString& outToneBuffer) {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ PC_AUTO_ENTER_API_CALL(false);
+
+ JSErrorResult jrv;
+
+ // Retrieve track
+ RefPtr<MediaStreamTrack> mst = sender.GetTrack(jrv);
+ if (jrv.Failed()) {
+ NS_WARNING("Failed to retrieve track for RTCRtpSender!");
+ return jrv.StealNSResult();
+ }
+
+ nsString senderTrackId;
+ mst->GetId(senderTrackId);
+
+ // Attempt to locate state for the DTMFSender
+ for (auto& dtmfState : mDTMFStates) {
+ if (dtmfState->mTrackId == senderTrackId) {
+ outToneBuffer = dtmfState->mTones;
+ break;
+ }
+ }
+#endif
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::ReplaceTrack(MediaStreamTrack& aThisTrack,
+ MediaStreamTrack& aWithTrack) {
+ PC_AUTO_ENTER_API_CALL(true);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsString trackId;
+ aThisTrack.GetId(trackId);
+
+ for (size_t i = 0; i < mDTMFStates.Length(); ++i) {
+ if (mDTMFStates[i]->mTrackId == trackId) {
+ mDTMFStates[i]->mSendTimer->Cancel();
+ mDTMFStates.RemoveElementAt(i);
+ break;
+ }
+ }
+#endif
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ JSErrorResult jrv;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (&aThisTrack == &aWithTrack) {
+ pco->OnReplaceTrackSuccess(jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, "Error firing replaceTrack success callback");
+ return NS_ERROR_UNEXPECTED;
+ }
+ return NS_OK;
+ }
+
+ nsString thisKind;
+ aThisTrack.GetKind(thisKind);
+ nsString withKind;
+ aWithTrack.GetKind(withKind);
+
+ if (thisKind != withKind) {
+ pco->OnReplaceTrackError(kIncompatibleMediaStreamTrack,
+ ObString(mJsepSession->GetLastError().c_str()),
+ jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, "Error firing replaceTrack success callback");
+ return NS_ERROR_UNEXPECTED;
+ }
+ return NS_OK;
+ }
+#endif
+ std::string origTrackId = PeerConnectionImpl::GetTrackId(aThisTrack);
+ std::string newTrackId = PeerConnectionImpl::GetTrackId(aWithTrack);
+
+ RefPtr<LocalSourceStreamInfo> info =
+ media()->GetLocalStreamByTrackId(origTrackId);
+ if (!info) {
+ CSFLogError(logTag, "Could not find stream from trackId");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ std::string origStreamId = info->GetId();
+ std::string newStreamId =
+ PeerConnectionImpl::GetStreamId(*aWithTrack.mOwningStream);
+
+ nsresult rv = mJsepSession->ReplaceTrack(origStreamId,
+ origTrackId,
+ newStreamId,
+ newTrackId);
+ if (NS_FAILED(rv)) {
+ pco->OnReplaceTrackError(kInvalidMediastreamTrack,
+ ObString(mJsepSession->GetLastError().c_str()),
+ jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, "Error firing replaceTrack error callback");
+ return NS_ERROR_UNEXPECTED;
+ }
+ return NS_OK;
+ }
+
+ rv = media()->ReplaceTrack(origStreamId,
+ origTrackId,
+ aWithTrack,
+ newStreamId,
+ newTrackId);
+
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "Unexpected error in ReplaceTrack: %d",
+ static_cast<int>(rv));
+ pco->OnReplaceTrackError(kInvalidMediastreamTrack,
+ ObString("Failed to replace track"),
+ jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, "Error firing replaceTrack error callback");
+ return NS_ERROR_UNEXPECTED;
+ }
+ return NS_OK;
+ }
+ aThisTrack.RemovePrincipalChangeObserver(this);
+ aWithTrack.AddPrincipalChangeObserver(this);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ PrincipalChanged(&aWithTrack);
+#endif
+
+ // We update the media pipelines here so we can apply different codec
+ // settings for different sources (e.g. screensharing as opposed to camera.)
+ // TODO: We should probably only do this if the source has in fact changed.
+ mMedia->UpdateMediaPipelines(*mJsepSession);
+
+ pco->OnReplaceTrackSuccess(jrv);
+ if (jrv.Failed()) {
+ CSFLogError(logTag, "Error firing replaceTrack success callback");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ return NS_OK;
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+NS_IMETHODIMP
+PeerConnectionImpl::SetParameters(MediaStreamTrack& aTrack,
+ const RTCRtpParameters& aParameters) {
+ PC_AUTO_ENTER_API_CALL(true);
+
+ std::vector<JsepTrack::JsConstraints> constraints;
+ if (aParameters.mEncodings.WasPassed()) {
+ for (auto& encoding : aParameters.mEncodings.Value()) {
+ JsepTrack::JsConstraints constraint;
+ if (encoding.mRid.WasPassed()) {
+ constraint.rid = NS_ConvertUTF16toUTF8(encoding.mRid.Value()).get();
+ }
+ if (encoding.mMaxBitrate.WasPassed()) {
+ constraint.constraints.maxBr = encoding.mMaxBitrate.Value();
+ }
+ constraint.constraints.scaleDownBy = encoding.mScaleResolutionDownBy;
+ constraints.push_back(constraint);
+ }
+ }
+ return SetParameters(aTrack, constraints);
+}
+#endif
+
+nsresult
+PeerConnectionImpl::SetParameters(
+ MediaStreamTrack& aTrack,
+ const std::vector<JsepTrack::JsConstraints>& aConstraints)
+{
+ std::string trackId = PeerConnectionImpl::GetTrackId(aTrack);
+ RefPtr<LocalSourceStreamInfo> info = media()->GetLocalStreamByTrackId(trackId);
+ if (!info) {
+ CSFLogError(logTag, "%s: Unknown stream", __FUNCTION__);
+ return NS_ERROR_INVALID_ARG;
+ }
+ std::string streamId = info->GetId();
+
+ return mJsepSession->SetParameters(streamId, trackId, aConstraints);
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+NS_IMETHODIMP
+PeerConnectionImpl::GetParameters(MediaStreamTrack& aTrack,
+ RTCRtpParameters& aOutParameters) {
+ PC_AUTO_ENTER_API_CALL(true);
+
+ std::vector<JsepTrack::JsConstraints> constraints;
+ nsresult rv = GetParameters(aTrack, &constraints);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ aOutParameters.mEncodings.Construct();
+ for (auto& constraint : constraints) {
+ RTCRtpEncodingParameters encoding;
+ encoding.mRid.Construct(NS_ConvertASCIItoUTF16(constraint.rid.c_str()));
+ encoding.mMaxBitrate.Construct(constraint.constraints.maxBr);
+ encoding.mScaleResolutionDownBy = constraint.constraints.scaleDownBy;
+ aOutParameters.mEncodings.Value().AppendElement(Move(encoding), fallible);
+ }
+ return NS_OK;
+}
+#endif
+
+nsresult
+PeerConnectionImpl::GetParameters(
+ MediaStreamTrack& aTrack,
+ std::vector<JsepTrack::JsConstraints>* aOutConstraints)
+{
+ std::string trackId = PeerConnectionImpl::GetTrackId(aTrack);
+ RefPtr<LocalSourceStreamInfo> info = media()->GetLocalStreamByTrackId(trackId);
+ if (!info) {
+ CSFLogError(logTag, "%s: Unknown stream", __FUNCTION__);
+ return NS_ERROR_INVALID_ARG;
+ }
+ std::string streamId = info->GetId();
+
+ return mJsepSession->GetParameters(streamId, trackId, aOutConstraints);
+}
+
+nsresult
+PeerConnectionImpl::CalculateFingerprint(
+ const std::string& algorithm,
+ std::vector<uint8_t>* fingerprint) const {
+ uint8_t buf[DtlsIdentity::HASH_ALGORITHM_MAX_LENGTH];
+ size_t len = 0;
+
+ MOZ_ASSERT(fingerprint);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ const UniqueCERTCertificate& cert = mCertificate->Certificate();
+#else
+ const UniqueCERTCertificate& cert = mIdentity->cert();
+#endif
+ nsresult rv = DtlsIdentity::ComputeFingerprint(cert, algorithm,
+ &buf[0], sizeof(buf),
+ &len);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "Unable to calculate certificate fingerprint, rv=%u",
+ static_cast<unsigned>(rv));
+ return rv;
+ }
+ MOZ_ASSERT(len > 0 && len <= DtlsIdentity::HASH_ALGORITHM_MAX_LENGTH);
+ fingerprint->assign(buf, buf + len);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetFingerprint(char** fingerprint)
+{
+ MOZ_ASSERT(fingerprint);
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ MOZ_ASSERT(mCertificate);
+#endif
+ std::vector<uint8_t> fp;
+ nsresult rv = CalculateFingerprint(DtlsIdentity::DEFAULT_HASH_ALGORITHM, &fp);
+ NS_ENSURE_SUCCESS(rv, rv);
+ std::ostringstream os;
+ os << DtlsIdentity::DEFAULT_HASH_ALGORITHM << ' '
+ << SdpFingerprintAttributeList::FormatFingerprint(fp);
+ std::string fpStr = os.str();
+
+ char* tmp = new char[fpStr.size() + 1];
+ std::copy(fpStr.begin(), fpStr.end(), tmp);
+ tmp[fpStr.size()] = '\0';
+
+ *fingerprint = tmp;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetLocalDescription(char** aSDP)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(aSDP);
+ std::string localSdp = mJsepSession->GetLocalDescription();
+
+ char* tmp = new char[localSdp.size() + 1];
+ std::copy(localSdp.begin(), localSdp.end(), tmp);
+ tmp[localSdp.size()] = '\0';
+
+ *aSDP = tmp;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetRemoteDescription(char** aSDP)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(aSDP);
+ std::string remoteSdp = mJsepSession->GetRemoteDescription();
+
+ char* tmp = new char[remoteSdp.size() + 1];
+ std::copy(remoteSdp.begin(), remoteSdp.end(), tmp);
+ tmp[remoteSdp.size()] = '\0';
+
+ *aSDP = tmp;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::SignalingState(PCImplSignalingState* aState)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(aState);
+
+ *aState = mSignalingState;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::IceConnectionState(PCImplIceConnectionState* aState)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(aState);
+
+ *aState = mIceConnectionState;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::IceGatheringState(PCImplIceGatheringState* aState)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(aState);
+
+ *aState = mIceGatheringState;
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionImpl::CheckApiState(bool assert_ice_ready) const
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(mTrickle || !assert_ice_ready ||
+ (mIceGatheringState == PCImplIceGatheringState::Complete));
+
+ if (IsClosed()) {
+ CSFLogError(logTag, "%s: called API while closed", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+ if (!mMedia) {
+ CSFLogError(logTag, "%s: called API with disposed mMedia", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::Close()
+{
+ CSFLogDebug(logTag, "%s: for %s", __FUNCTION__, mHandle.c_str());
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+
+ SetSignalingState_m(PCImplSignalingState::SignalingClosed);
+
+ return NS_OK;
+}
+
+bool
+PeerConnectionImpl::PluginCrash(uint32_t aPluginID,
+ const nsAString& aPluginName)
+{
+ // fire an event to the DOM window if this is "ours"
+ bool result = mMedia ? mMedia->AnyCodecHasPluginID(aPluginID) : false;
+ if (!result) {
+ return false;
+ }
+
+ CSFLogError(logTag, "%s: Our plugin %llu crashed", __FUNCTION__, static_cast<unsigned long long>(aPluginID));
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsCOMPtr<nsIDocument> doc = mWindow->GetExtantDoc();
+ if (!doc) {
+ NS_WARNING("Couldn't get document for PluginCrashed event!");
+ return true;
+ }
+
+ PluginCrashedEventInit init;
+ init.mPluginID = aPluginID;
+ init.mPluginName = aPluginName;
+ init.mSubmittedCrashReport = false;
+ init.mGmpPlugin = true;
+ init.mBubbles = true;
+ init.mCancelable = true;
+
+ RefPtr<PluginCrashedEvent> event =
+ PluginCrashedEvent::Constructor(doc, NS_LITERAL_STRING("PluginCrashed"), init);
+
+ event->SetTrusted(true);
+ event->WidgetEventPtr()->mFlags.mOnlyChromeDispatch = true;
+
+ EventDispatcher::DispatchDOMEvent(mWindow, nullptr, event, nullptr, nullptr);
+#endif
+
+ return true;
+}
+
+void
+PeerConnectionImpl::RecordEndOfCallTelemetry() const
+{
+ if (!mJsepSession) {
+ return;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // Bitmask used for WEBRTC/LOOP_CALL_TYPE telemetry reporting
+ static const uint32_t kAudioTypeMask = 1;
+ static const uint32_t kVideoTypeMask = 2;
+ static const uint32_t kDataChannelTypeMask = 4;
+
+ // Report end-of-call Telemetry
+ if (mJsepSession->GetNegotiations() > 0) {
+ Telemetry::Accumulate(Telemetry::WEBRTC_RENEGOTIATIONS,
+ mJsepSession->GetNegotiations()-1);
+ }
+ Telemetry::Accumulate(Telemetry::WEBRTC_MAX_VIDEO_SEND_TRACK,
+ mMaxSending[SdpMediaSection::MediaType::kVideo]);
+ Telemetry::Accumulate(Telemetry::WEBRTC_MAX_VIDEO_RECEIVE_TRACK,
+ mMaxReceiving[SdpMediaSection::MediaType::kVideo]);
+ Telemetry::Accumulate(Telemetry::WEBRTC_MAX_AUDIO_SEND_TRACK,
+ mMaxSending[SdpMediaSection::MediaType::kAudio]);
+ Telemetry::Accumulate(Telemetry::WEBRTC_MAX_AUDIO_RECEIVE_TRACK,
+ mMaxReceiving[SdpMediaSection::MediaType::kAudio]);
+ // DataChannels appear in both Sending and Receiving
+ Telemetry::Accumulate(Telemetry::WEBRTC_DATACHANNEL_NEGOTIATED,
+ mMaxSending[SdpMediaSection::MediaType::kApplication]);
+ // Enumerated/bitmask: 1 = Audio, 2 = Video, 4 = DataChannel
+ // A/V = 3, A/V/D = 7, etc
+ uint32_t type = 0;
+ if (mMaxSending[SdpMediaSection::MediaType::kAudio] ||
+ mMaxReceiving[SdpMediaSection::MediaType::kAudio]) {
+ type = kAudioTypeMask;
+ }
+ if (mMaxSending[SdpMediaSection::MediaType::kVideo] ||
+ mMaxReceiving[SdpMediaSection::MediaType::kVideo]) {
+ type |= kVideoTypeMask;
+ }
+ if (mMaxSending[SdpMediaSection::MediaType::kApplication]) {
+ type |= kDataChannelTypeMask;
+ }
+ Telemetry::Accumulate(Telemetry::WEBRTC_CALL_TYPE,
+ type);
+#endif
+}
+
+nsresult
+PeerConnectionImpl::CloseInt()
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+
+ for (auto& dtmfState : mDTMFStates) {
+ dtmfState->mSendTimer->Cancel();
+ }
+
+ // We do this at the end of the call because we want to make sure we've waited
+ // for all trickle ICE candidates to come in; this can happen well after we've
+ // transitioned to connected. As a bonus, this allows us to detect race
+ // conditions where a stats dispatch happens right as the PC closes.
+ if (!mPrivateWindow) {
+ RecordLongtermICEStatistics();
+ }
+ RecordEndOfCallTelemetry();
+ CSFLogInfo(logTag, "%s: Closing PeerConnectionImpl %s; "
+ "ending call", __FUNCTION__, mHandle.c_str());
+ if (mJsepSession) {
+ mJsepSession->Close();
+ }
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (mDataConnection) {
+ CSFLogInfo(logTag, "%s: Destroying DataChannelConnection %p for %s",
+ __FUNCTION__, (void *) mDataConnection.get(), mHandle.c_str());
+ mDataConnection->Destroy();
+ mDataConnection = nullptr; // it may not go away until the runnables are dead
+ }
+#endif
+ ShutdownMedia();
+
+ // DataConnection will need to stay alive until all threads/runnables exit
+
+ return NS_OK;
+}
+
+void
+PeerConnectionImpl::ShutdownMedia()
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+
+ if (!mMedia)
+ return;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // before we destroy references to local tracks, detach from them
+ for(uint32_t i = 0; i < media()->LocalStreamsLength(); ++i) {
+ LocalSourceStreamInfo *info = media()->GetLocalStreamByIndex(i);
+ for (const auto& pair : info->GetMediaStreamTracks()) {
+ pair.second->RemovePrincipalChangeObserver(this);
+ }
+ }
+
+ // End of call to be recorded in Telemetry
+ if (!mStartTime.IsNull()){
+ TimeDuration timeDelta = TimeStamp::Now() - mStartTime;
+ Telemetry::Accumulate(Telemetry::WEBRTC_CALL_DURATION,
+ timeDelta.ToSeconds());
+ }
+#endif
+
+ // Forget the reference so that we can transfer it to
+ // SelfDestruct().
+ mMedia.forget().take()->SelfDestruct();
+}
+
+void
+PeerConnectionImpl::SetSignalingState_m(PCImplSignalingState aSignalingState,
+ bool rollback)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ if (mSignalingState == aSignalingState ||
+ mSignalingState == PCImplSignalingState::SignalingClosed) {
+ return;
+ }
+
+ if (aSignalingState == PCImplSignalingState::SignalingHaveLocalOffer ||
+ (aSignalingState == PCImplSignalingState::SignalingStable &&
+ mSignalingState == PCImplSignalingState::SignalingHaveRemoteOffer &&
+ !rollback)) {
+ mMedia->EnsureTransports(*mJsepSession);
+ }
+
+ mSignalingState = aSignalingState;
+
+ bool fireNegotiationNeeded = false;
+ if (mSignalingState == PCImplSignalingState::SignalingStable) {
+ if (mMedia->GetIceRestartState() ==
+ PeerConnectionMedia::ICE_RESTART_PROVISIONAL) {
+ if (rollback) {
+ RollbackIceRestart();
+ } else {
+ mMedia->CommitIceRestart();
+ }
+ }
+
+ // Either negotiation is done, or we've rolled back. In either case, we
+ // need to re-evaluate whether further negotiation is required.
+ mNegotiationNeeded = false;
+ // If we're rolling back a local offer, we might need to remove some
+ // transports, but nothing further needs to be done.
+ mMedia->ActivateOrRemoveTransports(*mJsepSession);
+ if (!rollback) {
+ mMedia->UpdateMediaPipelines(*mJsepSession);
+ InitializeDataChannel();
+ mMedia->StartIceChecks(*mJsepSession);
+ }
+
+ if (!mJsepSession->AllLocalTracksAreAssigned()) {
+ CSFLogInfo(logTag, "Not all local tracks were assigned to an "
+ "m-section, either because the offerer did not offer"
+ " to receive enough tracks, or because tracks were "
+ "added after CreateOffer/Answer, but before "
+ "offer/answer completed. This requires "
+ "renegotiation.");
+ fireNegotiationNeeded = true;
+ }
+
+ // Telemetry: record info on the current state of streams/renegotiations/etc
+ // Note: this code gets run on rollbacks as well!
+
+ // Update the max channels used with each direction for each type
+ uint16_t receiving[SdpMediaSection::kMediaTypes];
+ uint16_t sending[SdpMediaSection::kMediaTypes];
+ mJsepSession->CountTracks(receiving, sending);
+ for (size_t i = 0; i < SdpMediaSection::kMediaTypes; i++) {
+ if (mMaxReceiving[i] < receiving[i]) {
+ mMaxReceiving[i] = receiving[i];
+ }
+ if (mMaxSending[i] < sending[i]) {
+ mMaxSending[i] = sending[i];
+ }
+ }
+ }
+
+ if (mSignalingState == PCImplSignalingState::SignalingClosed) {
+ CloseInt();
+ }
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return;
+ }
+ JSErrorResult rv;
+ pco->OnStateChange(PCObserverStateType::SignalingState, rv);
+
+ if (fireNegotiationNeeded) {
+ // We don't use MaybeFireNegotiationNeeded here, since content might have
+ // already cased a transition from stable.
+ OnNegotiationNeeded();
+ }
+}
+
+void
+PeerConnectionImpl::UpdateSignalingState(bool rollback) {
+ mozilla::JsepSignalingState state =
+ mJsepSession->GetState();
+
+ PCImplSignalingState newState;
+
+ switch(state) {
+ case kJsepStateStable:
+ newState = PCImplSignalingState::SignalingStable;
+ break;
+ case kJsepStateHaveLocalOffer:
+ newState = PCImplSignalingState::SignalingHaveLocalOffer;
+ break;
+ case kJsepStateHaveRemoteOffer:
+ newState = PCImplSignalingState::SignalingHaveRemoteOffer;
+ break;
+ case kJsepStateHaveLocalPranswer:
+ newState = PCImplSignalingState::SignalingHaveLocalPranswer;
+ break;
+ case kJsepStateHaveRemotePranswer:
+ newState = PCImplSignalingState::SignalingHaveRemotePranswer;
+ break;
+ case kJsepStateClosed:
+ newState = PCImplSignalingState::SignalingClosed;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ SetSignalingState_m(newState, rollback);
+}
+
+bool
+PeerConnectionImpl::IsClosed() const
+{
+ return mSignalingState == PCImplSignalingState::SignalingClosed;
+}
+
+bool
+PeerConnectionImpl::HasMedia() const
+{
+ return mMedia;
+}
+
+PeerConnectionWrapper::PeerConnectionWrapper(const std::string& handle)
+ : impl_(nullptr) {
+ if (PeerConnectionCtx::GetInstance()->mPeerConnections.find(handle) ==
+ PeerConnectionCtx::GetInstance()->mPeerConnections.end()) {
+ return;
+ }
+
+ PeerConnectionImpl *impl = PeerConnectionCtx::GetInstance()->mPeerConnections[handle];
+
+ if (!impl->media())
+ return;
+
+ impl_ = impl;
+}
+
+const std::string&
+PeerConnectionImpl::GetHandle()
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mHandle;
+}
+
+const std::string&
+PeerConnectionImpl::GetName()
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mName;
+}
+
+static mozilla::dom::PCImplIceConnectionState
+toDomIceConnectionState(NrIceCtx::ConnectionState state) {
+ switch (state) {
+ case NrIceCtx::ICE_CTX_INIT:
+ return PCImplIceConnectionState::New;
+ case NrIceCtx::ICE_CTX_CHECKING:
+ return PCImplIceConnectionState::Checking;
+ case NrIceCtx::ICE_CTX_CONNECTED:
+ return PCImplIceConnectionState::Connected;
+ case NrIceCtx::ICE_CTX_COMPLETED:
+ return PCImplIceConnectionState::Completed;
+ case NrIceCtx::ICE_CTX_FAILED:
+ return PCImplIceConnectionState::Failed;
+ case NrIceCtx::ICE_CTX_DISCONNECTED:
+ return PCImplIceConnectionState::Disconnected;
+ case NrIceCtx::ICE_CTX_CLOSED:
+ return PCImplIceConnectionState::Closed;
+ }
+ MOZ_CRASH();
+}
+
+static mozilla::dom::PCImplIceGatheringState
+toDomIceGatheringState(NrIceCtx::GatheringState state) {
+ switch (state) {
+ case NrIceCtx::ICE_CTX_GATHER_INIT:
+ return PCImplIceGatheringState::New;
+ case NrIceCtx::ICE_CTX_GATHER_STARTED:
+ return PCImplIceGatheringState::Gathering;
+ case NrIceCtx::ICE_CTX_GATHER_COMPLETE:
+ return PCImplIceGatheringState::Complete;
+ }
+ MOZ_CRASH();
+}
+
+void
+PeerConnectionImpl::CandidateReady(const std::string& candidate,
+ uint16_t level) {
+ PC_AUTO_ENTER_API_CALL_VOID_RETURN(false);
+
+ std::string mid;
+ bool skipped = false;
+ nsresult res = mJsepSession->AddLocalIceCandidate(candidate,
+ level,
+ &mid,
+ &skipped);
+
+ if (NS_FAILED(res)) {
+ std::string errorString = mJsepSession->GetLastError();
+
+ CSFLogError(logTag, "Failed to incorporate local candidate into SDP:"
+ " res = %u, candidate = %s, level = %u, error = %s",
+ static_cast<unsigned>(res),
+ candidate.c_str(),
+ static_cast<unsigned>(level),
+ errorString.c_str());
+ return;
+ }
+
+ if (skipped) {
+ CSFLogDebug(logTag, "Skipped adding local candidate %s (level %u) to SDP, "
+ "this typically happens because the m-section is "
+ "bundled, which means it doesn't make sense for it to "
+ "have its own transport-related attributes.",
+ candidate.c_str(),
+ static_cast<unsigned>(level));
+ return;
+ }
+
+ CSFLogDebug(logTag, "Passing local candidate to content: %s",
+ candidate.c_str());
+ SendLocalIceCandidateToContent(level, mid, candidate);
+
+ UpdateSignalingState();
+}
+
+static void
+SendLocalIceCandidateToContentImpl(nsWeakPtr weakPCObserver,
+ uint16_t level,
+ const std::string& mid,
+ const std::string& candidate) {
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(weakPCObserver);
+ if (!pco) {
+ return;
+ }
+
+ JSErrorResult rv;
+ pco->OnIceCandidate(level,
+ ObString(mid.c_str()),
+ ObString(candidate.c_str()),
+ rv);
+}
+
+void
+PeerConnectionImpl::SendLocalIceCandidateToContent(
+ uint16_t level,
+ const std::string& mid,
+ const std::string& candidate) {
+ // We dispatch this because OnSetLocalDescriptionSuccess does a setTimeout(0)
+ // to unwind the stack, but the event handlers don't. We need to ensure that
+ // the candidates do not skip ahead of the callback.
+ NS_DispatchToMainThread(
+ WrapRunnableNM(&SendLocalIceCandidateToContentImpl,
+ mPCObserver,
+ level,
+ mid,
+ candidate),
+ NS_DISPATCH_NORMAL);
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+static bool isDone(PCImplIceConnectionState state) {
+ return state != PCImplIceConnectionState::Checking &&
+ state != PCImplIceConnectionState::New;
+}
+
+static bool isSucceeded(PCImplIceConnectionState state) {
+ return state == PCImplIceConnectionState::Connected ||
+ state == PCImplIceConnectionState::Completed;
+}
+
+static bool isFailed(PCImplIceConnectionState state) {
+ return state == PCImplIceConnectionState::Failed;
+}
+#endif
+
+void PeerConnectionImpl::IceConnectionStateChange(
+ NrIceCtx* ctx,
+ NrIceCtx::ConnectionState state) {
+ PC_AUTO_ENTER_API_CALL_VOID_RETURN(false);
+
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+
+ auto domState = toDomIceConnectionState(state);
+ if (domState == mIceConnectionState) {
+ // no work to be done since the states are the same.
+ // this can happen during ICE rollback situations.
+ return;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (!isDone(mIceConnectionState) && isDone(domState)) {
+ // mIceStartTime can be null if going directly from New to Closed, in which
+ // case we don't count it as a success or a failure.
+ if (!mIceStartTime.IsNull()){
+ TimeDuration timeDelta = TimeStamp::Now() - mIceStartTime;
+ if (isSucceeded(domState)) {
+ Telemetry::Accumulate(Telemetry::WEBRTC_ICE_SUCCESS_TIME,
+ timeDelta.ToMilliseconds());
+ } else if (isFailed(domState)) {
+ Telemetry::Accumulate(Telemetry::WEBRTC_ICE_FAILURE_TIME,
+ timeDelta.ToMilliseconds());
+ }
+ }
+
+ if (isSucceeded(domState)) {
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_ICE_ADD_CANDIDATE_ERRORS_GIVEN_SUCCESS,
+ mAddCandidateErrorCount);
+ } else if (isFailed(domState)) {
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_ICE_ADD_CANDIDATE_ERRORS_GIVEN_FAILURE,
+ mAddCandidateErrorCount);
+ }
+ }
+#endif
+
+ mIceConnectionState = domState;
+
+ if (mIceConnectionState == PCImplIceConnectionState::Connected ||
+ mIceConnectionState == PCImplIceConnectionState::Completed ||
+ mIceConnectionState == PCImplIceConnectionState::Failed) {
+ if (mMedia->IsIceRestarting()) {
+ FinalizeIceRestart();
+ }
+ }
+
+ // Would be nice if we had a means of converting one of these dom enums
+ // to a string that wasn't almost as much text as this switch statement...
+ switch (mIceConnectionState) {
+ case PCImplIceConnectionState::New:
+ STAMP_TIMECARD(mTimeCard, "Ice state: new");
+ break;
+ case PCImplIceConnectionState::Checking:
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // For telemetry
+ mIceStartTime = TimeStamp::Now();
+#endif
+ STAMP_TIMECARD(mTimeCard, "Ice state: checking");
+ break;
+ case PCImplIceConnectionState::Connected:
+ STAMP_TIMECARD(mTimeCard, "Ice state: connected");
+ break;
+ case PCImplIceConnectionState::Completed:
+ STAMP_TIMECARD(mTimeCard, "Ice state: completed");
+ break;
+ case PCImplIceConnectionState::Failed:
+ STAMP_TIMECARD(mTimeCard, "Ice state: failed");
+ break;
+ case PCImplIceConnectionState::Disconnected:
+ STAMP_TIMECARD(mTimeCard, "Ice state: disconnected");
+ break;
+ case PCImplIceConnectionState::Closed:
+ STAMP_TIMECARD(mTimeCard, "Ice state: closed");
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unexpected mIceConnectionState!");
+ }
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return;
+ }
+ WrappableJSErrorResult rv;
+ RUN_ON_THREAD(mThread,
+ WrapRunnable(pco,
+ &PeerConnectionObserver::OnStateChange,
+ PCObserverStateType::IceConnectionState,
+ rv, static_cast<JSCompartment*>(nullptr)),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionImpl::IceGatheringStateChange(
+ NrIceCtx* ctx,
+ NrIceCtx::GatheringState state)
+{
+ PC_AUTO_ENTER_API_CALL_VOID_RETURN(false);
+
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+
+ mIceGatheringState = toDomIceGatheringState(state);
+
+ // Would be nice if we had a means of converting one of these dom enums
+ // to a string that wasn't almost as much text as this switch statement...
+ switch (mIceGatheringState) {
+ case PCImplIceGatheringState::New:
+ STAMP_TIMECARD(mTimeCard, "Ice gathering state: new");
+ break;
+ case PCImplIceGatheringState::Gathering:
+ STAMP_TIMECARD(mTimeCard, "Ice gathering state: gathering");
+ break;
+ case PCImplIceGatheringState::Complete:
+ STAMP_TIMECARD(mTimeCard, "Ice gathering state: complete");
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unexpected mIceGatheringState!");
+ }
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return;
+ }
+ WrappableJSErrorResult rv;
+ mThread->Dispatch(WrapRunnable(pco,
+ &PeerConnectionObserver::OnStateChange,
+ PCObserverStateType::IceGatheringState,
+ rv, static_cast<JSCompartment*>(nullptr)),
+ NS_DISPATCH_NORMAL);
+
+ if (mIceGatheringState == PCImplIceGatheringState::Complete) {
+ SendLocalIceCandidateToContent(0, "", "");
+ }
+}
+
+void
+PeerConnectionImpl::UpdateDefaultCandidate(const std::string& defaultAddr,
+ uint16_t defaultPort,
+ const std::string& defaultRtcpAddr,
+ uint16_t defaultRtcpPort,
+ uint16_t level) {
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+ mJsepSession->UpdateDefaultCandidate(defaultAddr,
+ defaultPort,
+ defaultRtcpAddr,
+ defaultRtcpPort,
+ level);
+}
+
+void
+PeerConnectionImpl::EndOfLocalCandidates(uint16_t level) {
+ CSFLogDebug(logTag, "%s", __FUNCTION__);
+ mJsepSession->EndOfLocalCandidates(level);
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+nsresult
+PeerConnectionImpl::BuildStatsQuery_m(
+ mozilla::dom::MediaStreamTrack *aSelector,
+ RTCStatsQuery *query) {
+
+ if (!HasMedia()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (!mThread) {
+ CSFLogError(logTag, "Could not build stats query, no MainThread");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsresult rv = GetTimeSinceEpoch(&(query->now));
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "Could not build stats query, could not get timestamp");
+ return rv;
+ }
+
+ // Note: mMedia->ice_ctx() is deleted on STS thread; so make sure we grab and hold
+ // a ref instead of making multiple calls. NrIceCtx uses threadsafe refcounting.
+ // NOTE: Do this after all other failure tests, to ensure we don't
+ // accidentally release the Ctx on Mainthread.
+ query->iceCtx = mMedia->ice_ctx();
+ if (!query->iceCtx) {
+ CSFLogError(logTag, "Could not build stats query, no ice_ctx");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // We do not use the pcHandle here, since that's risky to expose to content.
+ query->report = new RTCStatsReportInternalConstruct(
+ NS_ConvertASCIItoUTF16(mName.c_str()),
+ query->now);
+
+ query->iceStartTime = mIceStartTime;
+ query->failed = isFailed(mIceConnectionState);
+
+ // Populate SDP on main
+ if (query->internalStats) {
+ if (mJsepSession) {
+ std::string localDescription = mJsepSession->GetLocalDescription();
+ std::string remoteDescription = mJsepSession->GetRemoteDescription();
+ query->report->mLocalSdp.Construct(
+ NS_ConvertASCIItoUTF16(localDescription.c_str()));
+ query->report->mRemoteSdp.Construct(
+ NS_ConvertASCIItoUTF16(remoteDescription.c_str()));
+ }
+ }
+
+ // Gather up pipelines from mMedia so they may be inspected on STS
+
+ std::string trackId;
+ if (aSelector) {
+ trackId = PeerConnectionImpl::GetTrackId(*aSelector);
+ }
+
+ for (int i = 0, len = mMedia->LocalStreamsLength(); i < len; i++) {
+ for (auto pipeline : mMedia->GetLocalStreamByIndex(i)->GetPipelines()) {
+ if (!aSelector || pipeline.second->trackid() == trackId) {
+ query->pipelines.AppendElement(pipeline.second);
+ }
+ }
+ }
+ for (int i = 0, len = mMedia->RemoteStreamsLength(); i < len; i++) {
+ for (auto pipeline : mMedia->GetRemoteStreamByIndex(i)->GetPipelines()) {
+ if (!aSelector || pipeline.second->trackid() == trackId) {
+ query->pipelines.AppendElement(pipeline.second);
+ }
+ }
+ }
+
+ if (!aSelector) {
+ query->grabAllLevels = true;
+ }
+
+ return rv;
+}
+
+static void ToRTCIceCandidateStats(
+ const std::vector<NrIceCandidate>& candidates,
+ RTCStatsType candidateType,
+ const nsString& componentId,
+ DOMHighResTimeStamp now,
+ RTCStatsReportInternal* report) {
+
+ MOZ_ASSERT(report);
+ for (auto c = candidates.begin(); c != candidates.end(); ++c) {
+ RTCIceCandidateStats cand;
+ cand.mType.Construct(candidateType);
+ NS_ConvertASCIItoUTF16 codeword(c->codeword.c_str());
+ cand.mComponentId.Construct(componentId);
+ cand.mId.Construct(codeword);
+ cand.mTimestamp.Construct(now);
+ cand.mCandidateType.Construct(
+ RTCStatsIceCandidateType(c->type));
+ cand.mIpAddress.Construct(
+ NS_ConvertASCIItoUTF16(c->cand_addr.host.c_str()));
+ cand.mPortNumber.Construct(c->cand_addr.port);
+ cand.mTransport.Construct(
+ NS_ConvertASCIItoUTF16(c->cand_addr.transport.c_str()));
+ if (candidateType == RTCStatsType::Localcandidate) {
+ cand.mMozLocalTransport.Construct(
+ NS_ConvertASCIItoUTF16(c->local_addr.transport.c_str()));
+ }
+ report->mIceCandidateStats.Value().AppendElement(cand, fallible);
+ }
+}
+
+static void RecordIceStats_s(
+ NrIceMediaStream& mediaStream,
+ bool internalStats,
+ DOMHighResTimeStamp now,
+ RTCStatsReportInternal* report) {
+
+ NS_ConvertASCIItoUTF16 componentId(mediaStream.name().c_str());
+
+ std::vector<NrIceCandidatePair> candPairs;
+ nsresult res = mediaStream.GetCandidatePairs(&candPairs);
+ if (NS_FAILED(res)) {
+ CSFLogError(logTag, "%s: Error getting candidate pairs", __FUNCTION__);
+ return;
+ }
+
+ for (auto p = candPairs.begin(); p != candPairs.end(); ++p) {
+ NS_ConvertASCIItoUTF16 codeword(p->codeword.c_str());
+ NS_ConvertASCIItoUTF16 localCodeword(p->local.codeword.c_str());
+ NS_ConvertASCIItoUTF16 remoteCodeword(p->remote.codeword.c_str());
+ // Only expose candidate-pair statistics to chrome, until we've thought
+ // through the implications of exposing it to content.
+
+ RTCIceCandidatePairStats s;
+ s.mId.Construct(codeword);
+ s.mComponentId.Construct(componentId);
+ s.mTimestamp.Construct(now);
+ s.mType.Construct(RTCStatsType::Candidatepair);
+ s.mLocalCandidateId.Construct(localCodeword);
+ s.mRemoteCandidateId.Construct(remoteCodeword);
+ s.mNominated.Construct(p->nominated);
+ s.mPriority.Construct(p->priority);
+ s.mSelected.Construct(p->selected);
+ s.mState.Construct(RTCStatsIceCandidatePairState(p->state));
+ report->mIceCandidatePairStats.Value().AppendElement(s, fallible);
+ }
+
+ std::vector<NrIceCandidate> candidates;
+ if (NS_SUCCEEDED(mediaStream.GetLocalCandidates(&candidates))) {
+ ToRTCIceCandidateStats(candidates,
+ RTCStatsType::Localcandidate,
+ componentId,
+ now,
+ report);
+ }
+ candidates.clear();
+
+ if (NS_SUCCEEDED(mediaStream.GetRemoteCandidates(&candidates))) {
+ ToRTCIceCandidateStats(candidates,
+ RTCStatsType::Remotecandidate,
+ componentId,
+ now,
+ report);
+ }
+}
+
+nsresult
+PeerConnectionImpl::ExecuteStatsQuery_s(RTCStatsQuery *query) {
+
+ ASSERT_ON_THREAD(query->iceCtx->thread());
+
+ // Gather stats from pipelines provided (can't touch mMedia + stream on STS)
+
+ for (size_t p = 0; p < query->pipelines.Length(); ++p) {
+ const MediaPipeline& mp = *query->pipelines[p];
+ bool isAudio = (mp.Conduit()->type() == MediaSessionConduit::AUDIO);
+ nsString mediaType = isAudio ?
+ NS_LITERAL_STRING("audio") : NS_LITERAL_STRING("video");
+ nsString idstr = mediaType;
+ idstr.AppendLiteral("_");
+ idstr.AppendInt(mp.level());
+
+ // Gather pipeline stats.
+ switch (mp.direction()) {
+ case MediaPipeline::TRANSMIT: {
+ nsString localId = NS_LITERAL_STRING("outbound_rtp_") + idstr;
+ nsString remoteId;
+ nsString ssrc;
+ unsigned int ssrcval;
+ if (mp.Conduit()->GetLocalSSRC(&ssrcval)) {
+ ssrc.AppendInt(ssrcval);
+ }
+ {
+ // First, fill in remote stat with rtcp receiver data, if present.
+ // ReceiverReports have less information than SenderReports,
+ // so fill in what we can.
+ DOMHighResTimeStamp timestamp;
+ uint32_t jitterMs;
+ uint32_t packetsReceived;
+ uint64_t bytesReceived;
+ uint32_t packetsLost;
+ int32_t rtt;
+ if (mp.Conduit()->GetRTCPReceiverReport(&timestamp, &jitterMs,
+ &packetsReceived,
+ &bytesReceived,
+ &packetsLost,
+ &rtt)) {
+ remoteId = NS_LITERAL_STRING("outbound_rtcp_") + idstr;
+ RTCInboundRTPStreamStats s;
+ s.mTimestamp.Construct(timestamp);
+ s.mId.Construct(remoteId);
+ s.mType.Construct(RTCStatsType::Inboundrtp);
+ if (ssrc.Length()) {
+ s.mSsrc.Construct(ssrc);
+ }
+ s.mMediaType.Construct(mediaType);
+ s.mJitter.Construct(double(jitterMs)/1000);
+ s.mRemoteId.Construct(localId);
+ s.mIsRemote = true;
+ s.mPacketsReceived.Construct(packetsReceived);
+ s.mBytesReceived.Construct(bytesReceived);
+ s.mPacketsLost.Construct(packetsLost);
+ s.mMozRtt.Construct(rtt);
+ query->report->mInboundRTPStreamStats.Value().AppendElement(s,
+ fallible);
+ }
+ }
+ // Then, fill in local side (with cross-link to remote only if present)
+ {
+ RTCOutboundRTPStreamStats s;
+ s.mTimestamp.Construct(query->now);
+ s.mId.Construct(localId);
+ s.mType.Construct(RTCStatsType::Outboundrtp);
+ if (ssrc.Length()) {
+ s.mSsrc.Construct(ssrc);
+ }
+ s.mMediaType.Construct(mediaType);
+ s.mRemoteId.Construct(remoteId);
+ s.mIsRemote = false;
+ s.mPacketsSent.Construct(mp.rtp_packets_sent());
+ s.mBytesSent.Construct(mp.rtp_bytes_sent());
+
+ // Lastly, fill in video encoder stats if this is video
+ if (!isAudio) {
+ double framerateMean;
+ double framerateStdDev;
+ double bitrateMean;
+ double bitrateStdDev;
+ uint32_t droppedFrames;
+ if (mp.Conduit()->GetVideoEncoderStats(&framerateMean,
+ &framerateStdDev,
+ &bitrateMean,
+ &bitrateStdDev,
+ &droppedFrames)) {
+ s.mFramerateMean.Construct(framerateMean);
+ s.mFramerateStdDev.Construct(framerateStdDev);
+ s.mBitrateMean.Construct(bitrateMean);
+ s.mBitrateStdDev.Construct(bitrateStdDev);
+ s.mDroppedFrames.Construct(droppedFrames);
+ }
+ }
+ query->report->mOutboundRTPStreamStats.Value().AppendElement(s,
+ fallible);
+ }
+ break;
+ }
+ case MediaPipeline::RECEIVE: {
+ nsString localId = NS_LITERAL_STRING("inbound_rtp_") + idstr;
+ nsString remoteId;
+ nsString ssrc;
+ unsigned int ssrcval;
+ if (mp.Conduit()->GetRemoteSSRC(&ssrcval)) {
+ ssrc.AppendInt(ssrcval);
+ }
+ {
+ // First, fill in remote stat with rtcp sender data, if present.
+ DOMHighResTimeStamp timestamp;
+ uint32_t packetsSent;
+ uint64_t bytesSent;
+ if (mp.Conduit()->GetRTCPSenderReport(&timestamp,
+ &packetsSent, &bytesSent)) {
+ remoteId = NS_LITERAL_STRING("inbound_rtcp_") + idstr;
+ RTCOutboundRTPStreamStats s;
+ s.mTimestamp.Construct(timestamp);
+ s.mId.Construct(remoteId);
+ s.mType.Construct(RTCStatsType::Outboundrtp);
+ if (ssrc.Length()) {
+ s.mSsrc.Construct(ssrc);
+ }
+ s.mMediaType.Construct(mediaType);
+ s.mRemoteId.Construct(localId);
+ s.mIsRemote = true;
+ s.mPacketsSent.Construct(packetsSent);
+ s.mBytesSent.Construct(bytesSent);
+ query->report->mOutboundRTPStreamStats.Value().AppendElement(s,
+ fallible);
+ }
+ }
+ // Then, fill in local side (with cross-link to remote only if present)
+ RTCInboundRTPStreamStats s;
+ s.mTimestamp.Construct(query->now);
+ s.mId.Construct(localId);
+ s.mType.Construct(RTCStatsType::Inboundrtp);
+ if (ssrc.Length()) {
+ s.mSsrc.Construct(ssrc);
+ }
+ s.mMediaType.Construct(mediaType);
+ unsigned int jitterMs, packetsLost;
+ if (mp.Conduit()->GetRTPStats(&jitterMs, &packetsLost)) {
+ s.mJitter.Construct(double(jitterMs)/1000);
+ s.mPacketsLost.Construct(packetsLost);
+ }
+ if (remoteId.Length()) {
+ s.mRemoteId.Construct(remoteId);
+ }
+ s.mIsRemote = false;
+ s.mPacketsReceived.Construct(mp.rtp_packets_received());
+ s.mBytesReceived.Construct(mp.rtp_bytes_received());
+
+ if (query->internalStats && isAudio) {
+ int32_t jitterBufferDelay;
+ int32_t playoutBufferDelay;
+ int32_t avSyncDelta;
+ if (mp.Conduit()->GetAVStats(&jitterBufferDelay,
+ &playoutBufferDelay,
+ &avSyncDelta)) {
+ s.mMozJitterBufferDelay.Construct(jitterBufferDelay);
+ s.mMozAvSyncDelay.Construct(avSyncDelta);
+ }
+ }
+ // Lastly, fill in video decoder stats if this is video
+ if (!isAudio) {
+ double framerateMean;
+ double framerateStdDev;
+ double bitrateMean;
+ double bitrateStdDev;
+ uint32_t discardedPackets;
+ if (mp.Conduit()->GetVideoDecoderStats(&framerateMean,
+ &framerateStdDev,
+ &bitrateMean,
+ &bitrateStdDev,
+ &discardedPackets)) {
+ s.mFramerateMean.Construct(framerateMean);
+ s.mFramerateStdDev.Construct(framerateStdDev);
+ s.mBitrateMean.Construct(bitrateMean);
+ s.mBitrateStdDev.Construct(bitrateStdDev);
+ s.mDiscardedPackets.Construct(discardedPackets);
+ }
+ }
+ query->report->mInboundRTPStreamStats.Value().AppendElement(s,
+ fallible);
+ break;
+ }
+ }
+
+ if (!query->grabAllLevels) {
+ // If we're grabbing all levels, that means we want datachannels too,
+ // which don't have pipelines.
+ if (query->iceCtx->GetStream(p)) {
+ RecordIceStats_s(*query->iceCtx->GetStream(p),
+ query->internalStats,
+ query->now,
+ query->report);
+ }
+ }
+ }
+
+ if (query->grabAllLevels) {
+ for (size_t i = 0; i < query->iceCtx->GetStreamCount(); ++i) {
+ if (query->iceCtx->GetStream(i)) {
+ RecordIceStats_s(*query->iceCtx->GetStream(i),
+ query->internalStats,
+ query->now,
+ query->report);
+ }
+ }
+ }
+
+ // NrIceCtx must be destroyed on STS, so it is not safe
+ // to dispatch it back to main.
+ query->iceCtx = nullptr;
+ return NS_OK;
+}
+
+void PeerConnectionImpl::GetStatsForPCObserver_s(
+ const std::string& pcHandle, // The Runnable holds the memory
+ nsAutoPtr<RTCStatsQuery> query) {
+
+ MOZ_ASSERT(query);
+ MOZ_ASSERT(query->iceCtx);
+ ASSERT_ON_THREAD(query->iceCtx->thread());
+
+ nsresult rv = PeerConnectionImpl::ExecuteStatsQuery_s(query.get());
+
+ NS_DispatchToMainThread(
+ WrapRunnableNM(
+ &PeerConnectionImpl::DeliverStatsReportToPCObserver_m,
+ pcHandle,
+ rv,
+ query),
+ NS_DISPATCH_NORMAL);
+}
+
+void PeerConnectionImpl::DeliverStatsReportToPCObserver_m(
+ const std::string& pcHandle,
+ nsresult result,
+ nsAutoPtr<RTCStatsQuery> query) {
+
+ // Is the PeerConnectionImpl still around?
+ PeerConnectionWrapper pcw(pcHandle);
+ if (pcw.impl()) {
+ RefPtr<PeerConnectionObserver> pco =
+ do_QueryObjectReferent(pcw.impl()->mPCObserver);
+ if (pco) {
+ JSErrorResult rv;
+ if (NS_SUCCEEDED(result)) {
+ pco->OnGetStatsSuccess(*query->report, rv);
+ } else {
+ pco->OnGetStatsError(kInternalError,
+ ObString("Failed to fetch statistics"),
+ rv);
+ }
+
+ if (rv.Failed()) {
+ CSFLogError(logTag, "Error firing stats observer callback");
+ }
+ }
+ }
+}
+
+#endif
+
+void
+PeerConnectionImpl::RecordLongtermICEStatistics() {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ WebrtcGlobalInformation::StoreLongTermICEStatistics(*this);
+#endif
+}
+
+void
+PeerConnectionImpl::OnNegotiationNeeded()
+{
+ if (mSignalingState != PCImplSignalingState::SignalingStable) {
+ // We will check whether we need to renegotiate when we reach stable again
+ return;
+ }
+
+ if (mNegotiationNeeded) {
+ return;
+ }
+
+ mNegotiationNeeded = true;
+
+ RUN_ON_THREAD(mThread,
+ WrapRunnableNM(&MaybeFireNegotiationNeeded_static, mHandle),
+ NS_DISPATCH_NORMAL);
+}
+
+/* static */
+void
+PeerConnectionImpl::MaybeFireNegotiationNeeded_static(
+ const std::string& pcHandle)
+{
+ PeerConnectionWrapper wrapper(pcHandle);
+ if (!wrapper.impl()) {
+ return;
+ }
+
+ wrapper.impl()->MaybeFireNegotiationNeeded();
+}
+
+void
+PeerConnectionImpl::MaybeFireNegotiationNeeded()
+{
+ if (!mNegotiationNeeded) {
+ return;
+ }
+
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPCObserver);
+ if (!pco) {
+ return;
+ }
+
+ JSErrorResult rv;
+ pco->OnNegotiationNeeded(rv);
+}
+
+void
+PeerConnectionImpl::IceStreamReady(NrIceMediaStream *aStream)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ MOZ_ASSERT(aStream);
+
+ CSFLogDebug(logTag, "%s: %s", __FUNCTION__, aStream->name().c_str());
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+//Telemetry for when calls start
+void
+PeerConnectionImpl::startCallTelem() {
+ if (!mStartTime.IsNull()) {
+ return;
+ }
+
+ // Start time for calls
+ mStartTime = TimeStamp::Now();
+
+ // Increment session call counter
+ // If we want to track Loop calls independently here, we need two histograms.
+ Telemetry::Accumulate(Telemetry::WEBRTC_CALL_COUNT_2, 1);
+}
+#endif
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetLocalStreams(nsTArray<RefPtr<DOMMediaStream > >& result)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ for(uint32_t i=0; i < media()->LocalStreamsLength(); i++) {
+ LocalSourceStreamInfo *info = media()->GetLocalStreamByIndex(i);
+ NS_ENSURE_TRUE(info, NS_ERROR_UNEXPECTED);
+ result.AppendElement(info->GetMediaStream());
+ }
+ return NS_OK;
+#else
+ return NS_ERROR_FAILURE;
+#endif
+}
+
+NS_IMETHODIMP
+PeerConnectionImpl::GetRemoteStreams(nsTArray<RefPtr<DOMMediaStream > >& result)
+{
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ for(uint32_t i=0; i < media()->RemoteStreamsLength(); i++) {
+ RemoteSourceStreamInfo *info = media()->GetRemoteStreamByIndex(i);
+ NS_ENSURE_TRUE(info, NS_ERROR_UNEXPECTED);
+ result.AppendElement(info->GetMediaStream());
+ }
+ return NS_OK;
+#else
+ return NS_ERROR_FAILURE;
+#endif
+}
+
+nsresult
+PeerConnectionImpl::DTMFState::Notify(nsITimer* timer)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ nsString eventTone;
+ if (!mTones.IsEmpty()) {
+ uint16_t toneChar = mTones.CharAt(0);
+ int tone = GetDTMFToneCode(toneChar);
+
+ eventTone.Assign(toneChar);
+
+ mTones.Cut(0, 1);
+
+ if (tone == -1) {
+ mSendTimer->InitWithCallback(this, 2000, nsITimer::TYPE_ONE_SHOT);
+ } else {
+ // Reset delay if necessary
+ mSendTimer->InitWithCallback(this,
+ mDuration + mInterToneGap,
+ nsITimer::TYPE_ONE_SHOT);
+
+ RefPtr<AudioSessionConduit> conduit =
+ mPeerConnectionImpl->mMedia->GetAudioConduit(mLevel);
+
+ if (conduit) {
+ uint32_t duration = mDuration;
+ mPeerConnectionImpl->mSTSThread->Dispatch(WrapRunnableNM([conduit, tone, duration] () {
+ //Note: We default to channel 0, not inband, and 6dB attenuation.
+ // here. We might want to revisit these choices in the future.
+ conduit->InsertDTMFTone(0, tone, true, duration, 6);
+ }), NS_DISPATCH_NORMAL);
+ }
+ }
+ } else {
+ mSendTimer->Cancel();
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ RefPtr<PeerConnectionObserver> pco = do_QueryObjectReferent(mPeerConnectionImpl->mPCObserver);
+ if (!pco) {
+ NS_WARNING("Failed to dispatch the RTCDTMFToneChange event!");
+ return NS_OK; // Return is ignored anyhow
+ }
+
+ JSErrorResult jrv;
+ pco->OnDTMFToneChange(mTrackId, eventTone, jrv);
+
+ if (jrv.Failed()) {
+ NS_WARNING("Failed to dispatch the RTCDTMFToneChange event!");
+ }
+#endif
+
+ return NS_OK;
+}
+
+PeerConnectionImpl::DTMFState::DTMFState() = default;
+PeerConnectionImpl::DTMFState::~DTMFState() = default;
+
+NS_IMPL_ISUPPORTS(PeerConnectionImpl::DTMFState, nsITimerCallback)
+
+} // end mozilla namespace
diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
new file mode 100644
index 000000000..c29d08180
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.h
@@ -0,0 +1,894 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _PEER_CONNECTION_IMPL_H_
+#define _PEER_CONNECTION_IMPL_H_
+
+#include <deque>
+#include <string>
+#include <vector>
+#include <map>
+#include <cmath>
+
+#include "prlock.h"
+#include "mozilla/RefPtr.h"
+#include "nsWeakPtr.h"
+#include "nsAutoPtr.h"
+#include "nsIWeakReferenceUtils.h" // for the definition of nsWeakPtr
+#include "IPeerConnection.h"
+#include "sigslot.h"
+#include "nricectx.h"
+#include "nricemediastream.h"
+#include "nsComponentManagerUtils.h"
+#include "nsPIDOMWindow.h"
+#include "nsIUUIDGenerator.h"
+#include "nsIThread.h"
+
+#include "signaling/src/jsep/JsepSession.h"
+#include "signaling/src/jsep/JsepSessionImpl.h"
+#include "signaling/src/sdp/SdpMediaSection.h"
+
+#include "mozilla/ErrorResult.h"
+#include "mozilla/dom/PeerConnectionImplEnumsBinding.h"
+#include "PrincipalChangeObserver.h"
+#include "StreamTracks.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "mozilla/TimeStamp.h"
+#include "mozilla/net/DataChannel.h"
+#include "VideoUtils.h"
+#include "VideoSegment.h"
+#include "mozilla/dom/RTCStatsReportBinding.h"
+#include "nsIPrincipal.h"
+#include "mozilla/PeerIdentity.h"
+#endif
+
+namespace test {
+#ifdef USE_FAKE_PCOBSERVER
+class AFakePCObserver;
+#endif
+}
+
+#ifdef USE_FAKE_MEDIA_STREAMS
+class Fake_DOMMediaStream;
+class Fake_MediaStreamTrack;
+#endif
+
+class nsGlobalWindow;
+class nsDOMDataChannel;
+
+namespace mozilla {
+class DataChannel;
+class DtlsIdentity;
+class NrIceCtx;
+class NrIceMediaStream;
+class NrIceStunServer;
+class NrIceTurnServer;
+class MediaPipeline;
+
+#ifdef USE_FAKE_MEDIA_STREAMS
+typedef Fake_DOMMediaStream DOMMediaStream;
+#else
+class DOMMediaStream;
+#endif
+
+namespace dom {
+class RTCCertificate;
+struct RTCConfiguration;
+class RTCDTMFSender;
+struct RTCIceServer;
+struct RTCOfferOptions;
+struct RTCRtpParameters;
+class RTCRtpSender;
+#ifdef USE_FAKE_MEDIA_STREAMS
+typedef Fake_MediaStreamTrack MediaStreamTrack;
+#else
+class MediaStreamTrack;
+#endif
+
+#ifdef USE_FAKE_PCOBSERVER
+typedef test::AFakePCObserver PeerConnectionObserver;
+typedef const char *PCObserverString;
+#else
+class PeerConnectionObserver;
+typedef NS_ConvertUTF8toUTF16 PCObserverString;
+#endif
+}
+}
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+typedef struct Timecard Timecard;
+#else
+#include "timecard.h"
+#endif
+
+// To preserve blame, convert nsresult to ErrorResult with wrappers. These macros
+// help declare wrappers w/function being wrapped when there are no differences.
+
+#define NS_IMETHODIMP_TO_ERRORRESULT(func, rv, ...) \
+NS_IMETHODIMP func(__VA_ARGS__); \
+void func (__VA_ARGS__, rv)
+
+#define NS_IMETHODIMP_TO_ERRORRESULT_RETREF(resulttype, func, rv, ...) \
+NS_IMETHODIMP func(__VA_ARGS__, resulttype **result); \
+already_AddRefed<resulttype> func (__VA_ARGS__, rv)
+
+struct MediaStreamTable;
+
+namespace mozilla {
+
+using mozilla::dom::PeerConnectionObserver;
+using mozilla::dom::RTCConfiguration;
+using mozilla::dom::RTCIceServer;
+using mozilla::dom::RTCOfferOptions;
+using mozilla::DOMMediaStream;
+using mozilla::NrIceCtx;
+using mozilla::NrIceMediaStream;
+using mozilla::DtlsIdentity;
+using mozilla::ErrorResult;
+using mozilla::NrIceStunServer;
+using mozilla::NrIceTurnServer;
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+using mozilla::PeerIdentity;
+#endif
+
+class PeerConnectionWrapper;
+class PeerConnectionMedia;
+class RemoteSourceStreamInfo;
+
+// Uuid Generator
+class PCUuidGenerator : public mozilla::JsepUuidGenerator {
+ public:
+ virtual bool Generate(std::string* idp) override;
+
+ private:
+ nsCOMPtr<nsIUUIDGenerator> mGenerator;
+};
+
+class PeerConnectionConfiguration
+{
+public:
+ PeerConnectionConfiguration()
+ : mBundlePolicy(kBundleBalanced),
+ mIceTransportPolicy(NrIceCtx::ICE_POLICY_ALL) {}
+
+ bool addStunServer(const std::string& addr, uint16_t port,
+ const char* transport)
+ {
+ UniquePtr<NrIceStunServer> server(NrIceStunServer::Create(addr, port, transport));
+ if (!server) {
+ return false;
+ }
+ addStunServer(*server);
+ return true;
+ }
+ bool addTurnServer(const std::string& addr, uint16_t port,
+ const std::string& username,
+ const std::string& pwd,
+ const char* transport)
+ {
+ // TODO(ekr@rtfm.com): Need support for SASLprep for
+ // username and password. Bug # ???
+ std::vector<unsigned char> password(pwd.begin(), pwd.end());
+
+ UniquePtr<NrIceTurnServer> server(NrIceTurnServer::Create(addr, port, username, password,
+ transport));
+ if (!server) {
+ return false;
+ }
+ addTurnServer(*server);
+ return true;
+ }
+ void addStunServer(const NrIceStunServer& server) { mStunServers.push_back (server); }
+ void addTurnServer(const NrIceTurnServer& server) { mTurnServers.push_back (server); }
+ const std::vector<NrIceStunServer>& getStunServers() const { return mStunServers; }
+ const std::vector<NrIceTurnServer>& getTurnServers() const { return mTurnServers; }
+ void setBundlePolicy(JsepBundlePolicy policy) { mBundlePolicy = policy;}
+ JsepBundlePolicy getBundlePolicy() const { return mBundlePolicy; }
+ void setIceTransportPolicy(NrIceCtx::Policy policy) { mIceTransportPolicy = policy;}
+ NrIceCtx::Policy getIceTransportPolicy() const { return mIceTransportPolicy; }
+
+#ifndef MOZILLA_EXTERNAL_LINKAGE
+ nsresult Init(const RTCConfiguration& aSrc);
+ nsresult AddIceServer(const RTCIceServer& aServer);
+#endif
+
+private:
+ std::vector<NrIceStunServer> mStunServers;
+ std::vector<NrIceTurnServer> mTurnServers;
+ JsepBundlePolicy mBundlePolicy;
+ NrIceCtx::Policy mIceTransportPolicy;
+};
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+// Not an inner class so we can forward declare.
+class RTCStatsQuery {
+ public:
+ explicit RTCStatsQuery(bool internalStats);
+ ~RTCStatsQuery();
+
+ nsAutoPtr<mozilla::dom::RTCStatsReportInternal> report;
+ std::string error;
+ // A timestamp to help with telemetry.
+ mozilla::TimeStamp iceStartTime;
+ // Just for convenience, maybe integrate into the report later
+ bool failed;
+
+ private:
+ friend class PeerConnectionImpl;
+ std::string pcName;
+ bool internalStats;
+ nsTArray<RefPtr<mozilla::MediaPipeline>> pipelines;
+ RefPtr<NrIceCtx> iceCtx;
+ bool grabAllLevels;
+ DOMHighResTimeStamp now;
+};
+#endif // MOZILLA_INTERNAL_API
+
+// Enter an API call and check that the state is OK,
+// the PC isn't closed, etc.
+#define PC_AUTO_ENTER_API_CALL(assert_ice_ready) \
+ do { \
+ /* do/while prevents res from conflicting with locals */ \
+ nsresult res = CheckApiState(assert_ice_ready); \
+ if (NS_FAILED(res)) return res; \
+ } while(0)
+#define PC_AUTO_ENTER_API_CALL_VOID_RETURN(assert_ice_ready) \
+ do { \
+ /* do/while prevents res from conflicting with locals */ \
+ nsresult res = CheckApiState(assert_ice_ready); \
+ if (NS_FAILED(res)) return; \
+ } while(0)
+#define PC_AUTO_ENTER_API_CALL_NO_CHECK() CheckThread()
+
+class PeerConnectionImpl final : public nsISupports,
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ public mozilla::DataChannelConnection::DataConnectionListener,
+ public dom::PrincipalChangeObserver<dom::MediaStreamTrack>,
+#endif
+ public sigslot::has_slots<>
+{
+ struct Internal; // Avoid exposing c includes to bindings
+
+public:
+ explicit PeerConnectionImpl(const mozilla::dom::GlobalObject* aGlobal = nullptr);
+
+ enum Error {
+ kNoError = 0,
+ kInvalidCandidate = 2,
+ kInvalidMediastreamTrack = 3,
+ kInvalidState = 4,
+ kInvalidSessionDescription = 5,
+ kIncompatibleSessionDescription = 6,
+ kIncompatibleMediaStreamTrack = 8,
+ kInternalError = 9
+ };
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ bool WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto, JS::MutableHandle<JSObject*> aReflector);
+#endif
+
+ static already_AddRefed<PeerConnectionImpl>
+ Constructor(const mozilla::dom::GlobalObject& aGlobal, ErrorResult& rv);
+ static PeerConnectionImpl* CreatePeerConnection();
+ already_AddRefed<DOMMediaStream> MakeMediaStream();
+
+ nsresult CreateRemoteSourceStreamInfo(RefPtr<RemoteSourceStreamInfo>* aInfo,
+ const std::string& aId);
+
+ // DataConnection observers
+ void NotifyDataChannel(already_AddRefed<mozilla::DataChannel> aChannel)
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // PeerConnectionImpl only inherits from mozilla::DataChannelConnection
+ // inside libxul.
+ override
+#endif
+ ;
+
+ // Get the media object
+ const RefPtr<PeerConnectionMedia>& media() const {
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mMedia;
+ }
+
+ // Configure the ability to use localhost.
+ void SetAllowIceLoopback(bool val) { mAllowIceLoopback = val; }
+ bool GetAllowIceLoopback() const { return mAllowIceLoopback; }
+
+ // Configure the ability to use IPV6 link-local addresses.
+ void SetAllowIceLinkLocal(bool val) { mAllowIceLinkLocal = val; }
+ bool GetAllowIceLinkLocal() const { return mAllowIceLinkLocal; }
+
+ // Handle system to allow weak references to be passed through C code
+ virtual const std::string& GetHandle();
+
+ // Name suitable for exposing to content
+ virtual const std::string& GetName();
+
+ // ICE events
+ void IceConnectionStateChange(NrIceCtx* ctx,
+ NrIceCtx::ConnectionState state);
+ void IceGatheringStateChange(NrIceCtx* ctx,
+ NrIceCtx::GatheringState state);
+ void UpdateDefaultCandidate(const std::string& defaultAddr,
+ uint16_t defaultPort,
+ const std::string& defaultRtcpAddr,
+ uint16_t defaultRtcpPort,
+ uint16_t level);
+ void EndOfLocalCandidates(uint16_t level);
+ void IceStreamReady(NrIceMediaStream *aStream);
+
+ static void ListenThread(void *aData);
+ static void ConnectThread(void *aData);
+
+ // Get the main thread
+ nsCOMPtr<nsIThread> GetMainThread() {
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mThread;
+ }
+
+ // Get the STS thread
+ nsIEventTarget* GetSTSThread() {
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mSTSThread;
+ }
+
+ nsPIDOMWindowInner* GetWindow() const {
+ PC_AUTO_ENTER_API_CALL_NO_CHECK();
+ return mWindow;
+ }
+
+ // Initialize PeerConnection from a PeerConnectionConfiguration object
+ // (used directly by unit-tests, and indirectly by the JS entry point)
+ // This is necessary because RTCConfiguration can't be used by unit-tests
+ nsresult Initialize(PeerConnectionObserver& aObserver,
+ nsGlobalWindow* aWindow,
+ const PeerConnectionConfiguration& aConfiguration,
+ nsISupports* aThread);
+
+#ifndef MOZILLA_EXTERNAL_LINKAGE
+ // Initialize PeerConnection from an RTCConfiguration object (JS entrypoint)
+ void Initialize(PeerConnectionObserver& aObserver,
+ nsGlobalWindow& aWindow,
+ const RTCConfiguration& aConfiguration,
+ nsISupports* aThread,
+ ErrorResult &rv);
+#endif
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ void SetCertificate(mozilla::dom::RTCCertificate& aCertificate);
+ const RefPtr<mozilla::dom::RTCCertificate>& Certificate() const;
+#endif
+ // This is a hack to support external linkage.
+ RefPtr<DtlsIdentity> Identity() const;
+
+ NS_IMETHODIMP_TO_ERRORRESULT(CreateOffer, ErrorResult &rv,
+ const RTCOfferOptions& aOptions)
+ {
+ rv = CreateOffer(aOptions);
+ }
+
+ NS_IMETHODIMP CreateAnswer();
+ void CreateAnswer(ErrorResult &rv)
+ {
+ rv = CreateAnswer();
+ }
+
+ NS_IMETHODIMP CreateOffer(
+ const mozilla::JsepOfferOptions& aConstraints);
+
+ NS_IMETHODIMP SetLocalDescription (int32_t aAction, const char* aSDP);
+
+ void SetLocalDescription (int32_t aAction, const nsAString& aSDP, ErrorResult &rv)
+ {
+ rv = SetLocalDescription(aAction, NS_ConvertUTF16toUTF8(aSDP).get());
+ }
+
+ nsresult CreateNewRemoteTracks(RefPtr<PeerConnectionObserver>& aPco);
+
+ void RemoveOldRemoteTracks(RefPtr<PeerConnectionObserver>& aPco);
+
+ NS_IMETHODIMP SetRemoteDescription (int32_t aAction, const char* aSDP);
+
+ void SetRemoteDescription (int32_t aAction, const nsAString& aSDP, ErrorResult &rv)
+ {
+ rv = SetRemoteDescription(aAction, NS_ConvertUTF16toUTF8(aSDP).get());
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(GetStats, ErrorResult &rv,
+ mozilla::dom::MediaStreamTrack *aSelector)
+ {
+ rv = GetStats(aSelector);
+ }
+
+ NS_IMETHODIMP AddIceCandidate(const char* aCandidate, const char* aMid,
+ unsigned short aLevel);
+
+ void AddIceCandidate(const nsAString& aCandidate, const nsAString& aMid,
+ unsigned short aLevel, ErrorResult &rv)
+ {
+ rv = AddIceCandidate(NS_ConvertUTF16toUTF8(aCandidate).get(),
+ NS_ConvertUTF16toUTF8(aMid).get(), aLevel);
+ }
+
+ NS_IMETHODIMP CloseStreams();
+
+ void CloseStreams(ErrorResult &rv)
+ {
+ rv = CloseStreams();
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(AddTrack, ErrorResult &rv,
+ mozilla::dom::MediaStreamTrack& aTrack,
+ const mozilla::dom::Sequence<mozilla::OwningNonNull<DOMMediaStream>>& aStreams)
+ {
+ rv = AddTrack(aTrack, aStreams);
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(RemoveTrack, ErrorResult &rv,
+ mozilla::dom::MediaStreamTrack& aTrack)
+ {
+ rv = RemoveTrack(aTrack);
+ }
+
+ nsresult
+ AddTrack(mozilla::dom::MediaStreamTrack& aTrack, DOMMediaStream& aStream);
+
+ NS_IMETHODIMP_TO_ERRORRESULT(InsertDTMF, ErrorResult &rv,
+ dom::RTCRtpSender& sender,
+ const nsAString& tones,
+ uint32_t duration, uint32_t interToneGap) {
+ rv = InsertDTMF(sender, tones, duration, interToneGap);
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(GetDTMFToneBuffer, ErrorResult &rv,
+ dom::RTCRtpSender& sender,
+ nsAString& outToneBuffer) {
+ rv = GetDTMFToneBuffer(sender, outToneBuffer);
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(ReplaceTrack, ErrorResult &rv,
+ mozilla::dom::MediaStreamTrack& aThisTrack,
+ mozilla::dom::MediaStreamTrack& aWithTrack)
+ {
+ rv = ReplaceTrack(aThisTrack, aWithTrack);
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ NS_IMETHODIMP_TO_ERRORRESULT(SetParameters, ErrorResult &rv,
+ dom::MediaStreamTrack& aTrack,
+ const dom::RTCRtpParameters& aParameters)
+ {
+ rv = SetParameters(aTrack, aParameters);
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(GetParameters, ErrorResult &rv,
+ dom::MediaStreamTrack& aTrack,
+ dom::RTCRtpParameters& aOutParameters)
+ {
+ rv = GetParameters(aTrack, aOutParameters);
+ }
+#endif
+
+ nsresult
+ SetParameters(dom::MediaStreamTrack& aTrack,
+ const std::vector<JsepTrack::JsConstraints>& aConstraints);
+
+ nsresult
+ GetParameters(dom::MediaStreamTrack& aTrack,
+ std::vector<JsepTrack::JsConstraints>* aOutConstraints);
+
+ NS_IMETHODIMP_TO_ERRORRESULT(SelectSsrc, ErrorResult &rv,
+ dom::MediaStreamTrack& aRecvTrack,
+ unsigned short aSsrcIndex)
+ {
+ rv = SelectSsrc(aRecvTrack, aSsrcIndex);
+ }
+
+ nsresult GetPeerIdentity(nsAString& peerIdentity)
+ {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ if (mPeerIdentity) {
+ peerIdentity = mPeerIdentity->ToString();
+ return NS_OK;
+ }
+#endif
+
+ peerIdentity.SetIsVoid(true);
+ return NS_OK;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ const PeerIdentity* GetPeerIdentity() const { return mPeerIdentity; }
+ nsresult SetPeerIdentity(const nsAString& peerIdentity);
+
+ const std::string& GetIdAsAscii() const
+ {
+ return mName;
+ }
+
+ nsresult GetId(nsAString& id)
+ {
+ id = NS_ConvertASCIItoUTF16(mName.c_str());
+ return NS_OK;
+ }
+
+ nsresult SetId(const nsAString& id)
+ {
+ mName = NS_ConvertUTF16toUTF8(id).get();
+ return NS_OK;
+ }
+#endif
+
+ // this method checks to see if we've made a promise to protect media.
+ bool PrivacyRequested() const { return mPrivacyRequested; }
+
+ NS_IMETHODIMP GetFingerprint(char** fingerprint);
+ void GetFingerprint(nsAString& fingerprint)
+ {
+ char *tmp;
+ GetFingerprint(&tmp);
+ fingerprint.AssignASCII(tmp);
+ delete[] tmp;
+ }
+
+ NS_IMETHODIMP GetLocalDescription(char** aSDP);
+
+ void GetLocalDescription(nsAString& aSDP)
+ {
+ char *tmp;
+ GetLocalDescription(&tmp);
+ aSDP.AssignASCII(tmp);
+ delete[] tmp;
+ }
+
+ NS_IMETHODIMP GetRemoteDescription(char** aSDP);
+
+ void GetRemoteDescription(nsAString& aSDP)
+ {
+ char *tmp;
+ GetRemoteDescription(&tmp);
+ aSDP.AssignASCII(tmp);
+ delete[] tmp;
+ }
+
+ NS_IMETHODIMP SignalingState(mozilla::dom::PCImplSignalingState* aState);
+
+ mozilla::dom::PCImplSignalingState SignalingState()
+ {
+ mozilla::dom::PCImplSignalingState state;
+ SignalingState(&state);
+ return state;
+ }
+
+ NS_IMETHODIMP IceConnectionState(
+ mozilla::dom::PCImplIceConnectionState* aState);
+
+ mozilla::dom::PCImplIceConnectionState IceConnectionState()
+ {
+ mozilla::dom::PCImplIceConnectionState state;
+ IceConnectionState(&state);
+ return state;
+ }
+
+ NS_IMETHODIMP IceGatheringState(
+ mozilla::dom::PCImplIceGatheringState* aState);
+
+ mozilla::dom::PCImplIceGatheringState IceGatheringState()
+ {
+ mozilla::dom::PCImplIceGatheringState state;
+ IceGatheringState(&state);
+ return state;
+ }
+
+ NS_IMETHODIMP Close();
+
+ void Close(ErrorResult &rv)
+ {
+ rv = Close();
+ }
+
+ bool PluginCrash(uint32_t aPluginID,
+ const nsAString& aPluginName);
+
+ void RecordEndOfCallTelemetry() const;
+
+ nsresult InitializeDataChannel();
+
+ NS_IMETHODIMP_TO_ERRORRESULT_RETREF(nsDOMDataChannel,
+ CreateDataChannel, ErrorResult &rv,
+ const nsAString& aLabel,
+ const nsAString& aProtocol,
+ uint16_t aType,
+ bool outOfOrderAllowed,
+ uint16_t aMaxTime,
+ uint16_t aMaxNum,
+ bool aExternalNegotiated,
+ uint16_t aStream);
+
+ NS_IMETHODIMP_TO_ERRORRESULT(GetLocalStreams, ErrorResult &rv,
+ nsTArray<RefPtr<DOMMediaStream > >& result)
+ {
+ rv = GetLocalStreams(result);
+ }
+
+ NS_IMETHODIMP_TO_ERRORRESULT(GetRemoteStreams, ErrorResult &rv,
+ nsTArray<RefPtr<DOMMediaStream > >& result)
+ {
+ rv = GetRemoteStreams(result);
+ }
+
+ // Called whenever something is unrecognized by the parser
+ // May be called more than once and does not necessarily mean
+ // that parsing was stopped, only that something was unrecognized.
+ void OnSdpParseError(const char* errorMessage);
+
+ // Called when OnLocal/RemoteDescriptionSuccess/Error
+ // is called to start the list over.
+ void ClearSdpParseErrorMessages();
+
+ // Called to retreive the list of parsing errors.
+ const std::vector<std::string> &GetSdpParseErrors();
+
+ // Sets the RTC Signaling State
+ void SetSignalingState_m(mozilla::dom::PCImplSignalingState aSignalingState,
+ bool rollback = false);
+
+ // Updates the RTC signaling state based on the JsepSession state
+ void UpdateSignalingState(bool rollback = false);
+
+ bool IsClosed() const;
+ // called when DTLS connects; we only need this once
+ nsresult SetDtlsConnected(bool aPrivacyRequested);
+
+ bool HasMedia() const;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // initialize telemetry for when calls start
+ void startCallTelem();
+
+ nsresult BuildStatsQuery_m(
+ mozilla::dom::MediaStreamTrack *aSelector,
+ RTCStatsQuery *query);
+
+ static nsresult ExecuteStatsQuery_s(RTCStatsQuery *query);
+
+ // for monitoring changes in track ownership
+ // PeerConnectionMedia can't do it because it doesn't know about principals
+ virtual void PrincipalChanged(dom::MediaStreamTrack* aTrack) override;
+
+#endif
+
+ static std::string GetStreamId(const DOMMediaStream& aStream);
+ static std::string GetTrackId(const dom::MediaStreamTrack& track);
+
+ void OnMediaError(const std::string& aError);
+
+private:
+ virtual ~PeerConnectionImpl();
+ PeerConnectionImpl(const PeerConnectionImpl&rhs);
+ PeerConnectionImpl& operator=(PeerConnectionImpl);
+ nsresult CalculateFingerprint(const std::string& algorithm,
+ std::vector<uint8_t>* fingerprint) const;
+ nsresult ConfigureJsepSessionCodecs();
+
+ NS_IMETHODIMP EnsureDataConnection(uint16_t aNumstreams);
+
+ nsresult CloseInt();
+ nsresult CheckApiState(bool assert_ice_ready) const;
+ void CheckThread() const {
+ MOZ_ASSERT(CheckThreadInt(), "Wrong thread");
+ }
+ bool CheckThreadInt() const {
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // Thread assertions are disabled in the C++ unit tests because those
+ // make API calls off the main thread.
+ // This affects the standalone version of WebRTC since it is also used
+ // for an alternate build of the unit tests.
+ // TODO(ekr@rtfm.com): Fix the unit tests so they don't do that.
+ bool on;
+ NS_ENSURE_SUCCESS(mThread->IsOnCurrentThread(&on), false);
+ NS_ENSURE_TRUE(on, false);
+#endif
+ return true;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ nsresult GetTimeSinceEpoch(DOMHighResTimeStamp *result);
+#endif
+
+ // Shut down media - called on main thread only
+ void ShutdownMedia();
+
+ void CandidateReady(const std::string& candidate, uint16_t level);
+ void SendLocalIceCandidateToContent(uint16_t level,
+ const std::string& mid,
+ const std::string& candidate);
+
+ nsresult GetDatachannelParameters(
+ const mozilla::JsepApplicationCodecDescription** codec,
+ uint16_t* level) const;
+
+ static void DeferredAddTrackToJsepSession(const std::string& pcHandle,
+ SdpMediaSection::MediaType type,
+ const std::string& streamId,
+ const std::string& trackId);
+
+ nsresult AddTrackToJsepSession(SdpMediaSection::MediaType type,
+ const std::string& streamId,
+ const std::string& trackId);
+
+ nsresult SetupIceRestart();
+ nsresult RollbackIceRestart();
+ void FinalizeIceRestart();
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ static void GetStatsForPCObserver_s(
+ const std::string& pcHandle,
+ nsAutoPtr<RTCStatsQuery> query);
+
+ // Sends an RTCStatsReport to JS. Must run on main thread.
+ static void DeliverStatsReportToPCObserver_m(
+ const std::string& pcHandle,
+ nsresult result,
+ nsAutoPtr<RTCStatsQuery> query);
+#endif
+
+ // When ICE completes, we record a bunch of statistics that outlive the
+ // PeerConnection. This is just telemetry right now, but this can also
+ // include things like dumping the RLogConnector somewhere, saving away
+ // an RTCStatsReport somewhere so it can be inspected after the call is over,
+ // or other things.
+ void RecordLongtermICEStatistics();
+
+ void OnNegotiationNeeded();
+ static void MaybeFireNegotiationNeeded_static(const std::string& pcHandle);
+ void MaybeFireNegotiationNeeded();
+
+ // Timecard used to measure processing time. This should be the first class
+ // attribute so that we accurately measure the time required to instantiate
+ // any other attributes of this class.
+ Timecard *mTimeCard;
+
+ mozilla::dom::PCImplSignalingState mSignalingState;
+
+ // ICE State
+ mozilla::dom::PCImplIceConnectionState mIceConnectionState;
+ mozilla::dom::PCImplIceGatheringState mIceGatheringState;
+
+ // DTLS
+ // this is true if we have been connected ever, see SetDtlsConnected
+ bool mDtlsConnected;
+
+ nsCOMPtr<nsIThread> mThread;
+ // TODO: Remove if we ever properly wire PeerConnection for cycle-collection.
+ nsWeakPtr mPCObserver;
+
+ nsCOMPtr<nsPIDOMWindowInner> mWindow;
+
+ // The SDP sent in from JS - here for debugging.
+ std::string mLocalRequestedSDP;
+ std::string mRemoteRequestedSDP;
+
+ // DTLS fingerprint
+ std::string mFingerprint;
+ std::string mRemoteFingerprint;
+
+ // identity-related fields
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // The entity on the other end of the peer-to-peer connection;
+ // void if they are not yet identified, and no identity setting has been set
+ RefPtr<PeerIdentity> mPeerIdentity;
+ // The certificate we are using.
+ RefPtr<mozilla::dom::RTCCertificate> mCertificate;
+#else
+ RefPtr<DtlsIdentity> mIdentity;
+#endif
+ // Whether an app should be prevented from accessing media produced by the PC
+ // If this is true, then media will not be sent until mPeerIdentity matches
+ // local streams PeerIdentity; and remote streams are protected from content
+ //
+ // This can be false if mPeerIdentity is set, in the case where identity is
+ // provided, but the media is not protected from the app on either side
+ bool mPrivacyRequested;
+
+ // A handle to refer to this PC with
+ std::string mHandle;
+
+ // A name for this PC that we are willing to expose to content.
+ std::string mName;
+
+ // The target to run stuff on
+ nsCOMPtr<nsIEventTarget> mSTSThread;
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // DataConnection that's used to get all the DataChannels
+ RefPtr<mozilla::DataChannelConnection> mDataConnection;
+#endif
+
+ bool mAllowIceLoopback;
+ bool mAllowIceLinkLocal;
+ RefPtr<PeerConnectionMedia> mMedia;
+
+ // The JSEP negotiation session.
+ mozilla::UniquePtr<PCUuidGenerator> mUuidGen;
+ mozilla::UniquePtr<mozilla::JsepSession> mJsepSession;
+ std::string mPreviousIceUfrag; // used during rollback of ice restart
+ std::string mPreviousIcePwd; // used during rollback of ice restart
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // Start time of ICE, used for telemetry
+ mozilla::TimeStamp mIceStartTime;
+ // Start time of call used for Telemetry
+ mozilla::TimeStamp mStartTime;
+#endif
+
+ // Temporary: used to prevent multiple audio streams or multiple video streams
+ // in a single PC. This is tied up in the IETF discussion around proper
+ // representation of multiple streams in SDP, and strongly related to
+ // Bug 840728.
+ int mNumAudioStreams;
+ int mNumVideoStreams;
+ bool mHaveConfiguredCodecs;
+
+ bool mHaveDataStream;
+
+ unsigned int mAddCandidateErrorCount;
+
+ bool mTrickle;
+
+ bool mNegotiationNeeded;
+
+ bool mPrivateWindow;
+
+ // storage for Telemetry data
+ uint16_t mMaxReceiving[SdpMediaSection::kMediaTypes];
+ uint16_t mMaxSending[SdpMediaSection::kMediaTypes];
+
+ // DTMF
+ class DTMFState : public nsITimerCallback {
+ virtual ~DTMFState();
+ public:
+ DTMFState();
+
+ NS_DECL_NSITIMERCALLBACK
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ PeerConnectionImpl* mPeerConnectionImpl;
+ nsCOMPtr<nsITimer> mSendTimer;
+ nsString mTrackId;
+ nsString mTones;
+ size_t mLevel;
+ uint32_t mDuration;
+ uint32_t mInterToneGap;
+ };
+
+ nsTArray<RefPtr<DTMFState>> mDTMFStates;
+
+public:
+ //these are temporary until the DataChannel Listen/Connect API is removed
+ unsigned short listenPort;
+ unsigned short connectPort;
+ char *connectStr; // XXX ownership/free
+};
+
+// This is what is returned when you acquire on a handle
+class PeerConnectionWrapper
+{
+ public:
+ explicit PeerConnectionWrapper(const std::string& handle);
+
+ PeerConnectionImpl *impl() { return impl_; }
+
+ private:
+ RefPtr<PeerConnectionImpl> impl_;
+};
+
+} // end mozilla namespace
+
+#undef NS_IMETHODIMP_TO_ERRORRESULT
+#undef NS_IMETHODIMP_TO_ERRORRESULT_RETREF
+#endif // _PEER_CONNECTION_IMPL_H_
diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
new file mode 100644
index 000000000..4f42b0bb7
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
@@ -0,0 +1,1672 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "CSFLog.h"
+
+#include "nspr.h"
+
+#include "nricectx.h"
+#include "nricemediastream.h"
+#include "MediaPipelineFactory.h"
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionMedia.h"
+#include "AudioConduit.h"
+#include "VideoConduit.h"
+#include "runnable_utils.h"
+#include "transportlayerice.h"
+#include "transportlayerdtls.h"
+#include "signaling/src/jsep/JsepSession.h"
+#include "signaling/src/jsep/JsepTransport.h"
+
+#ifdef USE_FAKE_STREAMS
+#include "DOMMediaStream.h"
+#include "FakeMediaStreams.h"
+#else
+#include "MediaSegment.h"
+#ifdef MOZILLA_INTERNAL_API
+#include "MediaStreamGraph.h"
+#endif
+#endif
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+#include "MediaStreamGraphImpl.h"
+#endif
+
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsIURI.h"
+#include "nsIScriptSecurityManager.h"
+#include "nsICancelable.h"
+#include "nsILoadInfo.h"
+#include "nsIContentPolicy.h"
+#include "nsIProxyInfo.h"
+#include "nsIProtocolProxyService.h"
+
+#include "nsProxyRelease.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "MediaStreamList.h"
+#include "nsIScriptGlobalObject.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/dom/RTCStatsReportBinding.h"
+#include "MediaStreamTrack.h"
+#include "VideoStreamTrack.h"
+#include "MediaStreamError.h"
+#include "MediaManager.h"
+#endif
+
+
+
+namespace mozilla {
+using namespace dom;
+
+static const char* logTag = "PeerConnectionMedia";
+
+nsresult
+PeerConnectionMedia::ReplaceTrack(const std::string& aOldStreamId,
+ const std::string& aOldTrackId,
+ MediaStreamTrack& aNewTrack,
+ const std::string& aNewStreamId,
+ const std::string& aNewTrackId)
+{
+ RefPtr<LocalSourceStreamInfo> oldInfo(GetLocalStreamById(aOldStreamId));
+
+ if (!oldInfo) {
+ CSFLogError(logTag, "Failed to find stream id %s", aOldStreamId.c_str());
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ nsresult rv = AddTrack(*aNewTrack.mOwningStream, aNewStreamId,
+ aNewTrack, aNewTrackId);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ RefPtr<LocalSourceStreamInfo> newInfo(GetLocalStreamById(aNewStreamId));
+
+ if (!newInfo) {
+ CSFLogError(logTag, "Failed to add track id %s", aNewTrackId.c_str());
+ MOZ_ASSERT(false);
+ return NS_ERROR_FAILURE;
+ }
+
+ rv = newInfo->TakePipelineFrom(oldInfo, aOldTrackId, aNewTrack, aNewTrackId);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return RemoveLocalTrack(aOldStreamId, aOldTrackId);
+}
+
+static void
+PipelineReleaseRef_m(RefPtr<MediaPipeline> pipeline)
+{}
+
+static void
+PipelineDetachTransport_s(RefPtr<MediaPipeline> pipeline,
+ nsCOMPtr<nsIThread> mainThread)
+{
+ pipeline->DetachTransport_s();
+ mainThread->Dispatch(
+ // Make sure we let go of our reference before dispatching
+ // If the dispatch fails, well, we're hosed anyway.
+ WrapRunnableNM(PipelineReleaseRef_m, pipeline.forget()),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+SourceStreamInfo::EndTrack(MediaStream* stream, dom::MediaStreamTrack* track)
+{
+ if (!stream || !stream->AsSourceStream()) {
+ return;
+ }
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ class Message : public ControlMessage {
+ public:
+ Message(MediaStream* stream, TrackID track)
+ : ControlMessage(stream),
+ track_id_(track) {}
+
+ virtual void Run() override {
+ mStream->AsSourceStream()->EndTrack(track_id_);
+ }
+ private:
+ TrackID track_id_;
+ };
+
+ stream->GraphImpl()->AppendMessage(
+ MakeUnique<Message>(stream, track->mTrackID));
+#endif
+
+}
+
+void
+SourceStreamInfo::RemoveTrack(const std::string& trackId)
+{
+ mTracks.erase(trackId);
+
+ RefPtr<MediaPipeline> pipeline = GetPipelineByTrackId_m(trackId);
+ if (pipeline) {
+ mPipelines.erase(trackId);
+ pipeline->ShutdownMedia_m();
+ mParent->GetSTSThread()->Dispatch(
+ WrapRunnableNM(PipelineDetachTransport_s,
+ pipeline.forget(),
+ mParent->GetMainThread()),
+ NS_DISPATCH_NORMAL);
+ }
+}
+
+void SourceStreamInfo::DetachTransport_s()
+{
+ ASSERT_ON_THREAD(mParent->GetSTSThread());
+ // walk through all the MediaPipelines and call the shutdown
+ // transport functions. Must be on the STS thread.
+ for (auto it = mPipelines.begin(); it != mPipelines.end(); ++it) {
+ it->second->DetachTransport_s();
+ }
+}
+
+void SourceStreamInfo::DetachMedia_m()
+{
+ ASSERT_ON_THREAD(mParent->GetMainThread());
+
+ // walk through all the MediaPipelines and call the shutdown
+ // media functions. Must be on the main thread.
+ for (auto it = mPipelines.begin(); it != mPipelines.end(); ++it) {
+ it->second->ShutdownMedia_m();
+ }
+ mMediaStream = nullptr;
+}
+
+already_AddRefed<PeerConnectionImpl>
+PeerConnectionImpl::Constructor(const dom::GlobalObject& aGlobal, ErrorResult& rv)
+{
+ RefPtr<PeerConnectionImpl> pc = new PeerConnectionImpl(&aGlobal);
+
+ CSFLogDebug(logTag, "Created PeerConnection: %p", pc.get());
+
+ return pc.forget();
+}
+
+PeerConnectionImpl* PeerConnectionImpl::CreatePeerConnection()
+{
+ PeerConnectionImpl *pc = new PeerConnectionImpl();
+
+ CSFLogDebug(logTag, "Created PeerConnection: %p", pc);
+
+ return pc;
+}
+
+NS_IMETHODIMP PeerConnectionMedia::ProtocolProxyQueryHandler::
+OnProxyAvailable(nsICancelable *request,
+ nsIChannel *aChannel,
+ nsIProxyInfo *proxyinfo,
+ nsresult result) {
+
+ if (!pcm_->mProxyRequest) {
+ // PeerConnectionMedia is no longer waiting
+ return NS_OK;
+ }
+
+ CSFLogInfo(logTag, "%s: Proxy Available: %d", __FUNCTION__, (int)result);
+
+ if (NS_SUCCEEDED(result) && proxyinfo) {
+ SetProxyOnPcm(*proxyinfo);
+ }
+
+ pcm_->mProxyResolveCompleted = true;
+ pcm_->mProxyRequest = nullptr;
+ pcm_->FlushIceCtxOperationQueueIfReady();
+
+ return NS_OK;
+}
+
+void
+PeerConnectionMedia::ProtocolProxyQueryHandler::SetProxyOnPcm(
+ nsIProxyInfo& proxyinfo)
+{
+ CSFLogInfo(logTag, "%s: Had proxyinfo", __FUNCTION__);
+ nsresult rv;
+ nsCString httpsProxyHost;
+ int32_t httpsProxyPort;
+
+ rv = proxyinfo.GetHost(httpsProxyHost);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to get proxy server host", __FUNCTION__);
+ return;
+ }
+
+ rv = proxyinfo.GetPort(&httpsProxyPort);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to get proxy server port", __FUNCTION__);
+ return;
+ }
+
+ if (pcm_->mIceCtxHdlr.get()) {
+ assert(httpsProxyPort >= 0 && httpsProxyPort < (1 << 16));
+ // Note that this could check if PrivacyRequested() is set on the PC and
+ // remove "webrtc" from the ALPN list. But that would only work if the PC
+ // was constructed with a peerIdentity constraint, not when isolated
+ // streams are added. If we ever need to signal to the proxy that the
+ // media is isolated, then we would need to restructure this code.
+ pcm_->mProxyServer.reset(
+ new NrIceProxyServer(httpsProxyHost.get(),
+ static_cast<uint16_t>(httpsProxyPort),
+ "webrtc,c-webrtc"));
+ } else {
+ CSFLogError(logTag, "%s: Failed to set proxy server (ICE ctx unavailable)",
+ __FUNCTION__);
+ }
+}
+
+NS_IMPL_ISUPPORTS(PeerConnectionMedia::ProtocolProxyQueryHandler, nsIProtocolProxyCallback)
+
+PeerConnectionMedia::PeerConnectionMedia(PeerConnectionImpl *parent)
+ : mParent(parent),
+ mParentHandle(parent->GetHandle()),
+ mParentName(parent->GetName()),
+ mIceCtxHdlr(nullptr),
+ mDNSResolver(new NrIceResolver()),
+ mUuidGen(MakeUnique<PCUuidGenerator>()),
+ mMainThread(mParent->GetMainThread()),
+ mSTSThread(mParent->GetSTSThread()),
+ mProxyResolveCompleted(false),
+ mIceRestartState(ICE_RESTART_NONE) {
+}
+
+nsresult
+PeerConnectionMedia::InitProxy()
+{
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // Allow mochitests to disable this, since mochitest configures a fake proxy
+ // that serves up content.
+ bool disable = Preferences::GetBool("media.peerconnection.disable_http_proxy",
+ false);
+ if (disable) {
+ mProxyResolveCompleted = true;
+ return NS_OK;
+ }
+#endif
+
+ nsresult rv;
+ nsCOMPtr<nsIProtocolProxyService> pps =
+ do_GetService(NS_PROTOCOLPROXYSERVICE_CONTRACTID, &rv);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to get proxy service: %d", __FUNCTION__, (int)rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ // We use the following URL to find the "default" proxy address for all HTTPS
+ // connections. We will only attempt one HTTP(S) CONNECT per peer connection.
+ // "example.com" is guaranteed to be unallocated and should return the best default.
+ nsCOMPtr<nsIURI> fakeHttpsLocation;
+ rv = NS_NewURI(getter_AddRefs(fakeHttpsLocation), "https://example.com");
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to set URI: %d", __FUNCTION__, (int)rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ nsCOMPtr<nsIScriptSecurityManager> secMan(
+ do_GetService(NS_SCRIPTSECURITYMANAGER_CONTRACTID, &rv));
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to get IOService: %d",
+ __FUNCTION__, (int)rv);
+ CSFLogError(logTag, "%s: Failed to get securityManager: %d", __FUNCTION__, (int)rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ nsCOMPtr<nsIPrincipal> systemPrincipal;
+ rv = secMan->GetSystemPrincipal(getter_AddRefs(systemPrincipal));
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to get systemPrincipal: %d", __FUNCTION__, (int)rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ nsCOMPtr<nsIChannel> channel;
+ rv = NS_NewChannel(getter_AddRefs(channel),
+ fakeHttpsLocation,
+ systemPrincipal,
+ nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_DATA_IS_NULL,
+ nsIContentPolicy::TYPE_OTHER);
+
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to get channel from URI: %d",
+ __FUNCTION__, (int)rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<ProtocolProxyQueryHandler> handler = new ProtocolProxyQueryHandler(this);
+ rv = pps->AsyncResolve(channel,
+ nsIProtocolProxyService::RESOLVE_PREFER_HTTPS_PROXY |
+ nsIProtocolProxyService::RESOLVE_ALWAYS_TUNNEL,
+ handler, getter_AddRefs(mProxyRequest));
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: Failed to resolve protocol proxy: %d", __FUNCTION__, (int)rv);
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult PeerConnectionMedia::Init(const std::vector<NrIceStunServer>& stun_servers,
+ const std::vector<NrIceTurnServer>& turn_servers,
+ NrIceCtx::Policy policy)
+{
+ nsresult rv = InitProxy();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ bool ice_tcp = Preferences::GetBool("media.peerconnection.ice.tcp", false);
+#else
+ bool ice_tcp = false;
+#endif
+
+ // TODO(ekr@rtfm.com): need some way to set not offerer later
+ // Looks like a bug in the NrIceCtx API.
+ mIceCtxHdlr = NrIceCtxHandler::Create("PC:" + mParentName,
+ true, // Offerer
+ mParent->GetAllowIceLoopback(),
+ ice_tcp,
+ mParent->GetAllowIceLinkLocal(),
+ policy);
+ if(!mIceCtxHdlr) {
+ CSFLogError(logTag, "%s: Failed to create Ice Context", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ if (NS_FAILED(rv = mIceCtxHdlr->ctx()->SetStunServers(stun_servers))) {
+ CSFLogError(logTag, "%s: Failed to set stun servers", __FUNCTION__);
+ return rv;
+ }
+ // Give us a way to globally turn off TURN support
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ bool disabled = Preferences::GetBool("media.peerconnection.turn.disable", false);
+#else
+ bool disabled = false;
+#endif
+ if (!disabled) {
+ if (NS_FAILED(rv = mIceCtxHdlr->ctx()->SetTurnServers(turn_servers))) {
+ CSFLogError(logTag, "%s: Failed to set turn servers", __FUNCTION__);
+ return rv;
+ }
+ } else if (turn_servers.size() != 0) {
+ CSFLogError(logTag, "%s: Setting turn servers disabled", __FUNCTION__);
+ }
+ if (NS_FAILED(rv = mDNSResolver->Init())) {
+ CSFLogError(logTag, "%s: Failed to initialize dns resolver", __FUNCTION__);
+ return rv;
+ }
+ if (NS_FAILED(rv =
+ mIceCtxHdlr->ctx()->SetResolver(mDNSResolver->AllocateResolver()))) {
+ CSFLogError(logTag, "%s: Failed to get dns resolver", __FUNCTION__);
+ return rv;
+ }
+ ConnectSignals(mIceCtxHdlr->ctx().get());
+
+ return NS_OK;
+}
+
+void
+PeerConnectionMedia::EnsureTransports(const JsepSession& aSession)
+{
+ auto transports = aSession.GetTransports();
+ for (size_t i = 0; i < transports.size(); ++i) {
+ RefPtr<JsepTransport> transport = transports[i];
+ RUN_ON_THREAD(
+ GetSTSThread(),
+ WrapRunnable(RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::EnsureTransport_s,
+ i,
+ transport->mComponents),
+ NS_DISPATCH_NORMAL);
+ }
+
+ GatherIfReady();
+}
+
+void
+PeerConnectionMedia::EnsureTransport_s(size_t aLevel, size_t aComponentCount)
+{
+ RefPtr<NrIceMediaStream> stream(mIceCtxHdlr->ctx()->GetStream(aLevel));
+ if (!stream) {
+ CSFLogDebug(logTag, "%s: Creating ICE media stream=%u components=%u",
+ mParentHandle.c_str(),
+ static_cast<unsigned>(aLevel),
+ static_cast<unsigned>(aComponentCount));
+
+ std::ostringstream os;
+ os << mParentName << " aLevel=" << aLevel;
+ RefPtr<NrIceMediaStream> stream =
+ mIceCtxHdlr->CreateStream(os.str().c_str(),
+ aComponentCount);
+
+ if (!stream) {
+ CSFLogError(logTag, "Failed to create ICE stream.");
+ return;
+ }
+
+ stream->SetLevel(aLevel);
+ stream->SignalReady.connect(this, &PeerConnectionMedia::IceStreamReady_s);
+ stream->SignalCandidate.connect(this,
+ &PeerConnectionMedia::OnCandidateFound_s);
+ mIceCtxHdlr->ctx()->SetStream(aLevel, stream);
+ }
+}
+
+void
+PeerConnectionMedia::ActivateOrRemoveTransports(const JsepSession& aSession)
+{
+ auto transports = aSession.GetTransports();
+ for (size_t i = 0; i < transports.size(); ++i) {
+ RefPtr<JsepTransport> transport = transports[i];
+
+ std::string ufrag;
+ std::string pwd;
+ std::vector<std::string> candidates;
+
+ if (transport->mComponents) {
+ MOZ_ASSERT(transport->mIce);
+ CSFLogDebug(logTag, "Transport %u is active", static_cast<unsigned>(i));
+ ufrag = transport->mIce->GetUfrag();
+ pwd = transport->mIce->GetPassword();
+ candidates = transport->mIce->GetCandidates();
+ } else {
+ CSFLogDebug(logTag, "Transport %u is disabled", static_cast<unsigned>(i));
+ // Make sure the MediaPipelineFactory doesn't try to use these.
+ RemoveTransportFlow(i, false);
+ RemoveTransportFlow(i, true);
+ }
+
+ RUN_ON_THREAD(
+ GetSTSThread(),
+ WrapRunnable(RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::ActivateOrRemoveTransport_s,
+ i,
+ transport->mComponents,
+ ufrag,
+ pwd,
+ candidates),
+ NS_DISPATCH_NORMAL);
+ }
+
+ // We can have more streams than m-lines due to rollback.
+ RUN_ON_THREAD(
+ GetSTSThread(),
+ WrapRunnable(RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::RemoveTransportsAtOrAfter_s,
+ transports.size()),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::ActivateOrRemoveTransport_s(
+ size_t aMLine,
+ size_t aComponentCount,
+ const std::string& aUfrag,
+ const std::string& aPassword,
+ const std::vector<std::string>& aCandidateList) {
+
+ if (!aComponentCount) {
+ CSFLogDebug(logTag, "%s: Removing ICE media stream=%u",
+ mParentHandle.c_str(),
+ static_cast<unsigned>(aMLine));
+ mIceCtxHdlr->ctx()->SetStream(aMLine, nullptr);
+ return;
+ }
+
+ RefPtr<NrIceMediaStream> stream(mIceCtxHdlr->ctx()->GetStream(aMLine));
+ if (!stream) {
+ MOZ_ASSERT(false);
+ return;
+ }
+
+ if (!stream->HasParsedAttributes()) {
+ CSFLogDebug(logTag, "%s: Activating ICE media stream=%u components=%u",
+ mParentHandle.c_str(),
+ static_cast<unsigned>(aMLine),
+ static_cast<unsigned>(aComponentCount));
+
+ std::vector<std::string> attrs;
+ for (auto i = aCandidateList.begin(); i != aCandidateList.end(); ++i) {
+ attrs.push_back("candidate:" + *i);
+ }
+ attrs.push_back("ice-ufrag:" + aUfrag);
+ attrs.push_back("ice-pwd:" + aPassword);
+
+ nsresult rv = stream->ParseAttributes(attrs);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "Couldn't parse ICE attributes, rv=%u",
+ static_cast<unsigned>(rv));
+ }
+
+ for (size_t c = aComponentCount; c < stream->components(); ++c) {
+ // components are 1-indexed
+ stream->DisableComponent(c + 1);
+ }
+ }
+}
+
+void
+PeerConnectionMedia::RemoveTransportsAtOrAfter_s(size_t aMLine)
+{
+ for (size_t i = aMLine; i < mIceCtxHdlr->ctx()->GetStreamCount(); ++i) {
+ mIceCtxHdlr->ctx()->SetStream(i, nullptr);
+ }
+}
+
+nsresult PeerConnectionMedia::UpdateMediaPipelines(
+ const JsepSession& session) {
+ auto trackPairs = session.GetNegotiatedTrackPairs();
+ MediaPipelineFactory factory(this);
+ nsresult rv;
+
+ for (auto i = trackPairs.begin(); i != trackPairs.end(); ++i) {
+ JsepTrackPair pair = *i;
+
+ if (pair.mReceiving) {
+ rv = factory.CreateOrUpdateMediaPipeline(pair, *pair.mReceiving);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ if (pair.mSending) {
+ rv = factory.CreateOrUpdateMediaPipeline(pair, *pair.mSending);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+ }
+
+ for (auto& stream : mRemoteSourceStreams) {
+ stream->StartReceiving();
+ }
+
+ return NS_OK;
+}
+
+void
+PeerConnectionMedia::StartIceChecks(const JsepSession& aSession)
+{
+ nsCOMPtr<nsIRunnable> runnable(
+ WrapRunnable(
+ RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::StartIceChecks_s,
+ aSession.IsIceControlling(),
+ aSession.RemoteIsIceLite(),
+ // Copy, just in case API changes to return a ref
+ std::vector<std::string>(aSession.GetIceOptions())));
+
+ PerformOrEnqueueIceCtxOperation(runnable);
+}
+
+void
+PeerConnectionMedia::StartIceChecks_s(
+ bool aIsControlling,
+ bool aIsIceLite,
+ const std::vector<std::string>& aIceOptionsList) {
+
+ CSFLogDebug(logTag, "Starting ICE Checking");
+
+ std::vector<std::string> attributes;
+ if (aIsIceLite) {
+ attributes.push_back("ice-lite");
+ }
+
+ if (!aIceOptionsList.empty()) {
+ attributes.push_back("ice-options:");
+ for (auto i = aIceOptionsList.begin(); i != aIceOptionsList.end(); ++i) {
+ attributes.back() += *i + ' ';
+ }
+ }
+
+ nsresult rv = mIceCtxHdlr->ctx()->ParseGlobalAttributes(attributes);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "%s: couldn't parse global parameters", __FUNCTION__ );
+ }
+
+ mIceCtxHdlr->ctx()->SetControlling(aIsControlling ?
+ NrIceCtx::ICE_CONTROLLING :
+ NrIceCtx::ICE_CONTROLLED);
+
+ mIceCtxHdlr->ctx()->StartChecks();
+}
+
+bool
+PeerConnectionMedia::IsIceRestarting() const
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ return (mIceRestartState != ICE_RESTART_NONE);
+}
+
+PeerConnectionMedia::IceRestartState
+PeerConnectionMedia::GetIceRestartState() const
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ return mIceRestartState;
+}
+
+void
+PeerConnectionMedia::BeginIceRestart(const std::string& ufrag,
+ const std::string& pwd)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ if (IsIceRestarting()) {
+ return;
+ }
+
+ RefPtr<NrIceCtx> new_ctx = mIceCtxHdlr->CreateCtx(ufrag, pwd);
+
+ RUN_ON_THREAD(GetSTSThread(),
+ WrapRunnable(
+ RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::BeginIceRestart_s,
+ new_ctx),
+ NS_DISPATCH_NORMAL);
+
+ mIceRestartState = ICE_RESTART_PROVISIONAL;
+}
+
+void
+PeerConnectionMedia::BeginIceRestart_s(RefPtr<NrIceCtx> new_ctx)
+{
+ ASSERT_ON_THREAD(mSTSThread);
+
+ // hold the original context so we can disconnect signals if needed
+ RefPtr<NrIceCtx> originalCtx = mIceCtxHdlr->ctx();
+
+ if (mIceCtxHdlr->BeginIceRestart(new_ctx)) {
+ ConnectSignals(mIceCtxHdlr->ctx().get(), originalCtx.get());
+ }
+}
+
+void
+PeerConnectionMedia::CommitIceRestart()
+{
+ ASSERT_ON_THREAD(mMainThread);
+ if (mIceRestartState != ICE_RESTART_PROVISIONAL) {
+ return;
+ }
+
+ mIceRestartState = ICE_RESTART_COMMITTED;
+}
+
+void
+PeerConnectionMedia::FinalizeIceRestart()
+{
+ ASSERT_ON_THREAD(mMainThread);
+ if (!IsIceRestarting()) {
+ return;
+ }
+
+ RUN_ON_THREAD(GetSTSThread(),
+ WrapRunnable(
+ RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::FinalizeIceRestart_s),
+ NS_DISPATCH_NORMAL);
+
+ mIceRestartState = ICE_RESTART_NONE;
+}
+
+void
+PeerConnectionMedia::FinalizeIceRestart_s()
+{
+ ASSERT_ON_THREAD(mSTSThread);
+
+ // reset old streams since we don't need them anymore
+ for (auto i = mTransportFlows.begin();
+ i != mTransportFlows.end();
+ ++i) {
+ RefPtr<TransportFlow> aFlow = i->second;
+ if (!aFlow) continue;
+ TransportLayerIce* ice =
+ static_cast<TransportLayerIce*>(aFlow->GetLayer(TransportLayerIce::ID()));
+ ice->ResetOldStream();
+ }
+
+ mIceCtxHdlr->FinalizeIceRestart();
+}
+
+void
+PeerConnectionMedia::RollbackIceRestart()
+{
+ ASSERT_ON_THREAD(mMainThread);
+ if (mIceRestartState != ICE_RESTART_PROVISIONAL) {
+ return;
+ }
+
+ RUN_ON_THREAD(GetSTSThread(),
+ WrapRunnable(
+ RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::RollbackIceRestart_s),
+ NS_DISPATCH_NORMAL);
+
+ mIceRestartState = ICE_RESTART_NONE;
+}
+
+void
+PeerConnectionMedia::RollbackIceRestart_s()
+{
+ ASSERT_ON_THREAD(mSTSThread);
+
+ // hold the restart context so we can disconnect signals
+ RefPtr<NrIceCtx> restartCtx = mIceCtxHdlr->ctx();
+
+ // restore old streams since we're rolling back
+ for (auto i = mTransportFlows.begin();
+ i != mTransportFlows.end();
+ ++i) {
+ RefPtr<TransportFlow> aFlow = i->second;
+ if (!aFlow) continue;
+ TransportLayerIce* ice =
+ static_cast<TransportLayerIce*>(aFlow->GetLayer(TransportLayerIce::ID()));
+ ice->RestoreOldStream();
+ }
+
+ mIceCtxHdlr->RollbackIceRestart();
+ ConnectSignals(mIceCtxHdlr->ctx().get(), restartCtx.get());
+}
+
+bool
+PeerConnectionMedia::GetPrefDefaultAddressOnly() const
+{
+ ASSERT_ON_THREAD(mMainThread); // will crash on STS thread
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ uint64_t winId = mParent->GetWindow()->WindowID();
+
+ bool default_address_only = Preferences::GetBool(
+ "media.peerconnection.ice.default_address_only", false);
+ default_address_only |=
+ !MediaManager::Get()->IsActivelyCapturingOrHasAPermission(winId);
+#else
+ bool default_address_only = true;
+#endif
+ return default_address_only;
+}
+
+bool
+PeerConnectionMedia::GetPrefProxyOnly() const
+{
+ ASSERT_ON_THREAD(mMainThread); // will crash on STS thread
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ return Preferences::GetBool("media.peerconnection.ice.proxy_only", false);
+#else
+ return false;
+#endif
+}
+
+void
+PeerConnectionMedia::ConnectSignals(NrIceCtx *aCtx, NrIceCtx *aOldCtx)
+{
+ aCtx->SignalGatheringStateChange.connect(
+ this,
+ &PeerConnectionMedia::IceGatheringStateChange_s);
+ aCtx->SignalConnectionStateChange.connect(
+ this,
+ &PeerConnectionMedia::IceConnectionStateChange_s);
+
+ if (aOldCtx) {
+ MOZ_ASSERT(aCtx != aOldCtx);
+ aOldCtx->SignalGatheringStateChange.disconnect(this);
+ aOldCtx->SignalConnectionStateChange.disconnect(this);
+
+ // if the old and new connection state and/or gathering state is
+ // different fire the state update. Note: we don't fire the update
+ // if the state is *INIT since updates for the INIT state aren't
+ // sent during the normal flow. (mjf)
+ if (aOldCtx->connection_state() != aCtx->connection_state() &&
+ aCtx->connection_state() != NrIceCtx::ICE_CTX_INIT) {
+ aCtx->SignalConnectionStateChange(aCtx, aCtx->connection_state());
+ }
+
+ if (aOldCtx->gathering_state() != aCtx->gathering_state() &&
+ aCtx->gathering_state() != NrIceCtx::ICE_CTX_GATHER_INIT) {
+ aCtx->SignalGatheringStateChange(aCtx, aCtx->gathering_state());
+ }
+ }
+}
+
+void
+PeerConnectionMedia::AddIceCandidate(const std::string& candidate,
+ const std::string& mid,
+ uint32_t aMLine) {
+ RUN_ON_THREAD(GetSTSThread(),
+ WrapRunnable(
+ RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::AddIceCandidate_s,
+ std::string(candidate), // Make copies.
+ std::string(mid),
+ aMLine),
+ NS_DISPATCH_NORMAL);
+}
+void
+PeerConnectionMedia::AddIceCandidate_s(const std::string& aCandidate,
+ const std::string& aMid,
+ uint32_t aMLine) {
+ RefPtr<NrIceMediaStream> stream(mIceCtxHdlr->ctx()->GetStream(aMLine));
+ if (!stream) {
+ CSFLogError(logTag, "No ICE stream for candidate at level %u: %s",
+ static_cast<unsigned>(aMLine), aCandidate.c_str());
+ return;
+ }
+
+ nsresult rv = stream->ParseTrickleCandidate(aCandidate);
+ if (NS_FAILED(rv)) {
+ CSFLogError(logTag, "Couldn't process ICE candidate at level %u",
+ static_cast<unsigned>(aMLine));
+ return;
+ }
+}
+
+void
+PeerConnectionMedia::FlushIceCtxOperationQueueIfReady()
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ if (IsIceCtxReady()) {
+ for (auto i = mQueuedIceCtxOperations.begin();
+ i != mQueuedIceCtxOperations.end();
+ ++i) {
+ GetSTSThread()->Dispatch(*i, NS_DISPATCH_NORMAL);
+ }
+ mQueuedIceCtxOperations.clear();
+ }
+}
+
+void
+PeerConnectionMedia::PerformOrEnqueueIceCtxOperation(nsIRunnable* runnable)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ if (IsIceCtxReady()) {
+ GetSTSThread()->Dispatch(runnable, NS_DISPATCH_NORMAL);
+ } else {
+ mQueuedIceCtxOperations.push_back(runnable);
+ }
+}
+
+void
+PeerConnectionMedia::GatherIfReady() {
+ ASSERT_ON_THREAD(mMainThread);
+
+ nsCOMPtr<nsIRunnable> runnable(WrapRunnable(
+ RefPtr<PeerConnectionMedia>(this),
+ &PeerConnectionMedia::EnsureIceGathering_s,
+ GetPrefDefaultAddressOnly(),
+ GetPrefProxyOnly()));
+
+ PerformOrEnqueueIceCtxOperation(runnable);
+}
+
+void
+PeerConnectionMedia::EnsureIceGathering_s(bool aDefaultRouteOnly,
+ bool aProxyOnly) {
+ if (mProxyServer) {
+ mIceCtxHdlr->ctx()->SetProxyServer(*mProxyServer);
+ } else if (aProxyOnly) {
+ IceGatheringStateChange_s(mIceCtxHdlr->ctx().get(),
+ NrIceCtx::ICE_CTX_GATHER_COMPLETE);
+ return;
+ }
+
+ // Start gathering, but only if there are streams
+ for (size_t i = 0; i < mIceCtxHdlr->ctx()->GetStreamCount(); ++i) {
+ if (mIceCtxHdlr->ctx()->GetStream(i)) {
+ mIceCtxHdlr->ctx()->StartGathering(aDefaultRouteOnly, aProxyOnly);
+ return;
+ }
+ }
+
+ // If there are no streams, we're probably in a situation where we've rolled
+ // back while still waiting for our proxy configuration to come back. Make
+ // sure content knows that the rollback has stuck wrt gathering.
+ IceGatheringStateChange_s(mIceCtxHdlr->ctx().get(),
+ NrIceCtx::ICE_CTX_GATHER_COMPLETE);
+}
+
+nsresult
+PeerConnectionMedia::AddTrack(DOMMediaStream& aMediaStream,
+ const std::string& streamId,
+ MediaStreamTrack& aTrack,
+ const std::string& trackId)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ CSFLogDebug(logTag, "%s: MediaStream: %p", __FUNCTION__, &aMediaStream);
+
+ RefPtr<LocalSourceStreamInfo> localSourceStream =
+ GetLocalStreamById(streamId);
+
+ if (!localSourceStream) {
+ localSourceStream = new LocalSourceStreamInfo(&aMediaStream, this, streamId);
+ mLocalSourceStreams.AppendElement(localSourceStream);
+ }
+
+ localSourceStream->AddTrack(trackId, &aTrack);
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionMedia::RemoveLocalTrack(const std::string& streamId,
+ const std::string& trackId)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ CSFLogDebug(logTag, "%s: stream: %s track: %s", __FUNCTION__,
+ streamId.c_str(), trackId.c_str());
+
+ RefPtr<LocalSourceStreamInfo> localSourceStream =
+ GetLocalStreamById(streamId);
+ if (!localSourceStream) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ localSourceStream->RemoveTrack(trackId);
+ if (!localSourceStream->GetTrackCount()) {
+ mLocalSourceStreams.RemoveElement(localSourceStream);
+ }
+ return NS_OK;
+}
+
+nsresult
+PeerConnectionMedia::RemoveRemoteTrack(const std::string& streamId,
+ const std::string& trackId)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ CSFLogDebug(logTag, "%s: stream: %s track: %s", __FUNCTION__,
+ streamId.c_str(), trackId.c_str());
+
+ RefPtr<RemoteSourceStreamInfo> remoteSourceStream =
+ GetRemoteStreamById(streamId);
+ if (!remoteSourceStream) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ remoteSourceStream->RemoveTrack(trackId);
+ if (!remoteSourceStream->GetTrackCount()) {
+ mRemoteSourceStreams.RemoveElement(remoteSourceStream);
+ }
+ return NS_OK;
+}
+
+void
+PeerConnectionMedia::SelfDestruct()
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ CSFLogDebug(logTag, "%s: ", __FUNCTION__);
+
+ // Shut down the media
+ for (uint32_t i=0; i < mLocalSourceStreams.Length(); ++i) {
+ mLocalSourceStreams[i]->DetachMedia_m();
+ }
+
+ for (uint32_t i=0; i < mRemoteSourceStreams.Length(); ++i) {
+ mRemoteSourceStreams[i]->DetachMedia_m();
+ }
+
+ if (mProxyRequest) {
+ mProxyRequest->Cancel(NS_ERROR_ABORT);
+ mProxyRequest = nullptr;
+ }
+
+ // Shutdown the transport (async)
+ RUN_ON_THREAD(mSTSThread, WrapRunnable(
+ this, &PeerConnectionMedia::ShutdownMediaTransport_s),
+ NS_DISPATCH_NORMAL);
+
+ CSFLogDebug(logTag, "%s: Media shut down", __FUNCTION__);
+}
+
+void
+PeerConnectionMedia::SelfDestruct_m()
+{
+ CSFLogDebug(logTag, "%s: ", __FUNCTION__);
+
+ ASSERT_ON_THREAD(mMainThread);
+
+ mLocalSourceStreams.Clear();
+ mRemoteSourceStreams.Clear();
+
+ mMainThread = nullptr;
+
+ // Final self-destruct.
+ this->Release();
+}
+
+void
+PeerConnectionMedia::ShutdownMediaTransport_s()
+{
+ ASSERT_ON_THREAD(mSTSThread);
+
+ CSFLogDebug(logTag, "%s: ", __FUNCTION__);
+
+ // Here we access m{Local|Remote}SourceStreams off the main thread.
+ // That's OK because by here PeerConnectionImpl has forgotten about us,
+ // so there is no chance of getting a call in here from outside.
+ // The dispatches from SelfDestruct() and to SelfDestruct_m() provide
+ // memory barriers that protect us from badness.
+ for (uint32_t i=0; i < mLocalSourceStreams.Length(); ++i) {
+ mLocalSourceStreams[i]->DetachTransport_s();
+ }
+
+ for (uint32_t i=0; i < mRemoteSourceStreams.Length(); ++i) {
+ mRemoteSourceStreams[i]->DetachTransport_s();
+ }
+
+ disconnect_all();
+ mTransportFlows.clear();
+ mIceCtxHdlr = nullptr;
+
+ mMainThread->Dispatch(WrapRunnable(this, &PeerConnectionMedia::SelfDestruct_m),
+ NS_DISPATCH_NORMAL);
+}
+
+LocalSourceStreamInfo*
+PeerConnectionMedia::GetLocalStreamByIndex(int aIndex)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ if(aIndex < 0 || aIndex >= (int) mLocalSourceStreams.Length()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(mLocalSourceStreams[aIndex]);
+ return mLocalSourceStreams[aIndex];
+}
+
+LocalSourceStreamInfo*
+PeerConnectionMedia::GetLocalStreamById(const std::string& id)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ for (size_t i = 0; i < mLocalSourceStreams.Length(); ++i) {
+ if (id == mLocalSourceStreams[i]->GetId()) {
+ return mLocalSourceStreams[i];
+ }
+ }
+
+ return nullptr;
+}
+
+LocalSourceStreamInfo*
+PeerConnectionMedia::GetLocalStreamByTrackId(const std::string& id)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ for (RefPtr<LocalSourceStreamInfo>& info : mLocalSourceStreams) {
+ if (info->HasTrack(id)) {
+ return info;
+ }
+ }
+
+ return nullptr;
+}
+
+RemoteSourceStreamInfo*
+PeerConnectionMedia::GetRemoteStreamByIndex(size_t aIndex)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ MOZ_ASSERT(mRemoteSourceStreams.SafeElementAt(aIndex));
+ return mRemoteSourceStreams.SafeElementAt(aIndex);
+}
+
+RemoteSourceStreamInfo*
+PeerConnectionMedia::GetRemoteStreamById(const std::string& id)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ for (size_t i = 0; i < mRemoteSourceStreams.Length(); ++i) {
+ if (id == mRemoteSourceStreams[i]->GetId()) {
+ return mRemoteSourceStreams[i];
+ }
+ }
+
+ return nullptr;
+}
+
+RemoteSourceStreamInfo*
+PeerConnectionMedia::GetRemoteStreamByTrackId(const std::string& id)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ for (RefPtr<RemoteSourceStreamInfo>& info : mRemoteSourceStreams) {
+ if (info->HasTrack(id)) {
+ return info;
+ }
+ }
+
+ return nullptr;
+}
+
+
+nsresult
+PeerConnectionMedia::AddRemoteStream(RefPtr<RemoteSourceStreamInfo> aInfo)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ mRemoteSourceStreams.AppendElement(aInfo);
+
+ return NS_OK;
+}
+
+void
+PeerConnectionMedia::IceGatheringStateChange_s(NrIceCtx* ctx,
+ NrIceCtx::GatheringState state)
+{
+ ASSERT_ON_THREAD(mSTSThread);
+
+ if (state == NrIceCtx::ICE_CTX_GATHER_COMPLETE) {
+ // Fire off EndOfLocalCandidates for each stream
+ for (size_t i = 0; ; ++i) {
+ RefPtr<NrIceMediaStream> stream(ctx->GetStream(i));
+ if (!stream) {
+ break;
+ }
+
+ NrIceCandidate candidate;
+ NrIceCandidate rtcpCandidate;
+ GetDefaultCandidates(*stream, &candidate, &rtcpCandidate);
+ EndOfLocalCandidates(candidate.cand_addr.host,
+ candidate.cand_addr.port,
+ rtcpCandidate.cand_addr.host,
+ rtcpCandidate.cand_addr.port,
+ i);
+ }
+ }
+
+ // ShutdownMediaTransport_s has not run yet because it unhooks this function
+ // from its signal, which means that SelfDestruct_m has not been dispatched
+ // yet either, so this PCMedia will still be around when this dispatch reaches
+ // main.
+ GetMainThread()->Dispatch(
+ WrapRunnable(this,
+ &PeerConnectionMedia::IceGatheringStateChange_m,
+ ctx,
+ state),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::IceConnectionStateChange_s(NrIceCtx* ctx,
+ NrIceCtx::ConnectionState state)
+{
+ ASSERT_ON_THREAD(mSTSThread);
+ // ShutdownMediaTransport_s has not run yet because it unhooks this function
+ // from its signal, which means that SelfDestruct_m has not been dispatched
+ // yet either, so this PCMedia will still be around when this dispatch reaches
+ // main.
+ GetMainThread()->Dispatch(
+ WrapRunnable(this,
+ &PeerConnectionMedia::IceConnectionStateChange_m,
+ ctx,
+ state),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::OnCandidateFound_s(NrIceMediaStream *aStream,
+ const std::string &aCandidateLine)
+{
+ ASSERT_ON_THREAD(mSTSThread);
+ MOZ_ASSERT(aStream);
+ MOZ_RELEASE_ASSERT(mIceCtxHdlr);
+
+ CSFLogDebug(logTag, "%s: %s", __FUNCTION__, aStream->name().c_str());
+
+ NrIceCandidate candidate;
+ NrIceCandidate rtcpCandidate;
+ GetDefaultCandidates(*aStream, &candidate, &rtcpCandidate);
+
+ // ShutdownMediaTransport_s has not run yet because it unhooks this function
+ // from its signal, which means that SelfDestruct_m has not been dispatched
+ // yet either, so this PCMedia will still be around when this dispatch reaches
+ // main.
+ GetMainThread()->Dispatch(
+ WrapRunnable(this,
+ &PeerConnectionMedia::OnCandidateFound_m,
+ aCandidateLine,
+ candidate.cand_addr.host,
+ candidate.cand_addr.port,
+ rtcpCandidate.cand_addr.host,
+ rtcpCandidate.cand_addr.port,
+ aStream->GetLevel()),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::EndOfLocalCandidates(const std::string& aDefaultAddr,
+ uint16_t aDefaultPort,
+ const std::string& aDefaultRtcpAddr,
+ uint16_t aDefaultRtcpPort,
+ uint16_t aMLine)
+{
+ GetMainThread()->Dispatch(
+ WrapRunnable(this,
+ &PeerConnectionMedia::EndOfLocalCandidates_m,
+ aDefaultAddr,
+ aDefaultPort,
+ aDefaultRtcpAddr,
+ aDefaultRtcpPort,
+ aMLine),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::GetDefaultCandidates(const NrIceMediaStream& aStream,
+ NrIceCandidate* aCandidate,
+ NrIceCandidate* aRtcpCandidate)
+{
+ nsresult res = aStream.GetDefaultCandidate(1, aCandidate);
+ // Optional; component won't exist if doing rtcp-mux
+ if (NS_FAILED(aStream.GetDefaultCandidate(2, aRtcpCandidate))) {
+ aRtcpCandidate->cand_addr.host.clear();
+ aRtcpCandidate->cand_addr.port = 0;
+ }
+ if (NS_FAILED(res)) {
+ aCandidate->cand_addr.host.clear();
+ aCandidate->cand_addr.port = 0;
+ CSFLogError(logTag, "%s: GetDefaultCandidates failed for level %u, "
+ "res=%u",
+ __FUNCTION__,
+ static_cast<unsigned>(aStream.GetLevel()),
+ static_cast<unsigned>(res));
+ }
+}
+
+void
+PeerConnectionMedia::IceGatheringStateChange_m(NrIceCtx* ctx,
+ NrIceCtx::GatheringState state)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ SignalIceGatheringStateChange(ctx, state);
+}
+
+void
+PeerConnectionMedia::IceConnectionStateChange_m(NrIceCtx* ctx,
+ NrIceCtx::ConnectionState state)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ SignalIceConnectionStateChange(ctx, state);
+}
+
+void
+PeerConnectionMedia::IceStreamReady_s(NrIceMediaStream *aStream)
+{
+ MOZ_ASSERT(aStream);
+
+ CSFLogDebug(logTag, "%s: %s", __FUNCTION__, aStream->name().c_str());
+}
+
+void
+PeerConnectionMedia::OnCandidateFound_m(const std::string& aCandidateLine,
+ const std::string& aDefaultAddr,
+ uint16_t aDefaultPort,
+ const std::string& aDefaultRtcpAddr,
+ uint16_t aDefaultRtcpPort,
+ uint16_t aMLine)
+{
+ ASSERT_ON_THREAD(mMainThread);
+ if (!aDefaultAddr.empty()) {
+ SignalUpdateDefaultCandidate(aDefaultAddr,
+ aDefaultPort,
+ aDefaultRtcpAddr,
+ aDefaultRtcpPort,
+ aMLine);
+ }
+ SignalCandidate(aCandidateLine, aMLine);
+}
+
+void
+PeerConnectionMedia::EndOfLocalCandidates_m(const std::string& aDefaultAddr,
+ uint16_t aDefaultPort,
+ const std::string& aDefaultRtcpAddr,
+ uint16_t aDefaultRtcpPort,
+ uint16_t aMLine) {
+ ASSERT_ON_THREAD(mMainThread);
+ if (!aDefaultAddr.empty()) {
+ SignalUpdateDefaultCandidate(aDefaultAddr,
+ aDefaultPort,
+ aDefaultRtcpAddr,
+ aDefaultRtcpPort,
+ aMLine);
+ }
+ SignalEndOfLocalCandidates(aMLine);
+}
+
+void
+PeerConnectionMedia::DtlsConnected_s(TransportLayer *layer,
+ TransportLayer::State state)
+{
+ MOZ_ASSERT(layer->id() == "dtls");
+ TransportLayerDtls* dtlsLayer = static_cast<TransportLayerDtls*>(layer);
+ dtlsLayer->SignalStateChange.disconnect(this);
+
+ bool privacyRequested = (dtlsLayer->GetNegotiatedAlpn() == "c-webrtc");
+ GetMainThread()->Dispatch(
+ WrapRunnableNM(&PeerConnectionMedia::DtlsConnected_m,
+ mParentHandle, privacyRequested),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::DtlsConnected_m(const std::string& aParentHandle,
+ bool aPrivacyRequested)
+{
+ PeerConnectionWrapper pcWrapper(aParentHandle);
+ PeerConnectionImpl* pc = pcWrapper.impl();
+ if (pc) {
+ pc->SetDtlsConnected(aPrivacyRequested);
+ }
+}
+
+void
+PeerConnectionMedia::AddTransportFlow(int aIndex, bool aRtcp,
+ const RefPtr<TransportFlow> &aFlow)
+{
+ int index_inner = GetTransportFlowIndex(aIndex, aRtcp);
+
+ MOZ_ASSERT(!mTransportFlows[index_inner]);
+ mTransportFlows[index_inner] = aFlow;
+
+ GetSTSThread()->Dispatch(
+ WrapRunnable(this, &PeerConnectionMedia::ConnectDtlsListener_s, aFlow),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+PeerConnectionMedia::RemoveTransportFlow(int aIndex, bool aRtcp)
+{
+ int index_inner = GetTransportFlowIndex(aIndex, aRtcp);
+ NS_ProxyRelease(GetSTSThread(), mTransportFlows[index_inner].forget());
+}
+
+void
+PeerConnectionMedia::ConnectDtlsListener_s(const RefPtr<TransportFlow>& aFlow)
+{
+ TransportLayer* dtls = aFlow->GetLayer(TransportLayerDtls::ID());
+ if (dtls) {
+ dtls->SignalStateChange.connect(this, &PeerConnectionMedia::DtlsConnected_s);
+ }
+}
+
+nsresult
+LocalSourceStreamInfo::TakePipelineFrom(RefPtr<LocalSourceStreamInfo>& info,
+ const std::string& oldTrackId,
+ MediaStreamTrack& aNewTrack,
+ const std::string& newTrackId)
+{
+ if (mPipelines.count(newTrackId)) {
+ CSFLogError(logTag, "%s: Pipeline already exists for %s/%s",
+ __FUNCTION__, mId.c_str(), newTrackId.c_str());
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ RefPtr<MediaPipeline> pipeline(info->ForgetPipelineByTrackId_m(oldTrackId));
+
+ if (!pipeline) {
+ // Replacetrack can potentially happen in the middle of offer/answer, before
+ // the pipeline has been created.
+ CSFLogInfo(logTag, "%s: Replacing track before the pipeline has been "
+ "created, nothing to do.", __FUNCTION__);
+ return NS_OK;
+ }
+
+ nsresult rv =
+ static_cast<MediaPipelineTransmit*>(pipeline.get())->ReplaceTrack(aNewTrack);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ mPipelines[newTrackId] = pipeline;
+
+ return NS_OK;
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+/**
+ * Tells you if any local track is isolated to a specific peer identity.
+ * Obviously, we want all the tracks to be isolated equally so that they can
+ * all be sent or not. We check once when we are setting a local description
+ * and that determines if we flip the "privacy requested" bit on. Once the bit
+ * is on, all media originating from this peer connection is isolated.
+ *
+ * @returns true if any track has a peerIdentity set on it
+ */
+bool
+PeerConnectionMedia::AnyLocalTrackHasPeerIdentity() const
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ for (uint32_t u = 0; u < mLocalSourceStreams.Length(); u++) {
+ for (auto pair : mLocalSourceStreams[u]->GetMediaStreamTracks()) {
+ if (pair.second->GetPeerIdentity() != nullptr) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void
+PeerConnectionMedia::UpdateRemoteStreamPrincipals_m(nsIPrincipal* aPrincipal)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ for (uint32_t u = 0; u < mRemoteSourceStreams.Length(); u++) {
+ mRemoteSourceStreams[u]->UpdatePrincipal_m(aPrincipal);
+ }
+}
+
+void
+PeerConnectionMedia::UpdateSinkIdentity_m(MediaStreamTrack* aTrack,
+ nsIPrincipal* aPrincipal,
+ const PeerIdentity* aSinkIdentity)
+{
+ ASSERT_ON_THREAD(mMainThread);
+
+ for (uint32_t u = 0; u < mLocalSourceStreams.Length(); u++) {
+ mLocalSourceStreams[u]->UpdateSinkIdentity_m(aTrack, aPrincipal,
+ aSinkIdentity);
+ }
+}
+
+void
+LocalSourceStreamInfo::UpdateSinkIdentity_m(MediaStreamTrack* aTrack,
+ nsIPrincipal* aPrincipal,
+ const PeerIdentity* aSinkIdentity)
+{
+ for (auto it = mPipelines.begin(); it != mPipelines.end(); ++it) {
+ MediaPipelineTransmit* pipeline =
+ static_cast<MediaPipelineTransmit*>((*it).second.get());
+ pipeline->UpdateSinkIdentity_m(aTrack, aPrincipal, aSinkIdentity);
+ }
+}
+
+void RemoteSourceStreamInfo::UpdatePrincipal_m(nsIPrincipal* aPrincipal)
+{
+ // This blasts away the existing principal.
+ // We only do this when we become certain that the all tracks are safe to make
+ // accessible to the script principal.
+ for (auto& trackPair : mTracks) {
+ MOZ_RELEASE_ASSERT(trackPair.second);
+ RemoteTrackSource& source =
+ static_cast<RemoteTrackSource&>(trackPair.second->GetSource());
+ source.SetPrincipal(aPrincipal);
+
+ RefPtr<MediaPipeline> pipeline = GetPipelineByTrackId_m(trackPair.first);
+ if (pipeline) {
+ MOZ_ASSERT(pipeline->direction() == MediaPipeline::RECEIVE);
+ static_cast<MediaPipelineReceive*>(pipeline.get())
+ ->SetPrincipalHandle_m(MakePrincipalHandle(aPrincipal));
+ }
+ }
+}
+#endif // MOZILLA_INTERNAL_API
+
+bool
+PeerConnectionMedia::AnyCodecHasPluginID(uint64_t aPluginID)
+{
+ for (uint32_t i=0; i < mLocalSourceStreams.Length(); ++i) {
+ if (mLocalSourceStreams[i]->AnyCodecHasPluginID(aPluginID)) {
+ return true;
+ }
+ }
+ for (uint32_t i=0; i < mRemoteSourceStreams.Length(); ++i) {
+ if (mRemoteSourceStreams[i]->AnyCodecHasPluginID(aPluginID)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
+SourceStreamInfo::AnyCodecHasPluginID(uint64_t aPluginID)
+{
+ // Scan the videoConduits for this plugin ID
+ for (auto it = mPipelines.begin(); it != mPipelines.end(); ++it) {
+ if (it->second->Conduit()->CodecPluginID() == aPluginID) {
+ return true;
+ }
+ }
+ return false;
+}
+
+nsresult
+SourceStreamInfo::StorePipeline(
+ const std::string& trackId,
+ const RefPtr<mozilla::MediaPipeline>& aPipeline)
+{
+ MOZ_ASSERT(mPipelines.find(trackId) == mPipelines.end());
+ if (mPipelines.find(trackId) != mPipelines.end()) {
+ CSFLogError(logTag, "%s: Storing duplicate track", __FUNCTION__);
+ return NS_ERROR_FAILURE;
+ }
+
+ mPipelines[trackId] = aPipeline;
+ return NS_OK;
+}
+
+void
+RemoteSourceStreamInfo::DetachMedia_m()
+{
+ for (auto& webrtcIdAndTrack : mTracks) {
+ EndTrack(mMediaStream->GetInputStream(), webrtcIdAndTrack.second);
+ }
+ SourceStreamInfo::DetachMedia_m();
+}
+
+void
+RemoteSourceStreamInfo::RemoveTrack(const std::string& trackId)
+{
+ auto it = mTracks.find(trackId);
+ if (it != mTracks.end()) {
+ EndTrack(mMediaStream->GetInputStream(), it->second);
+ }
+
+ SourceStreamInfo::RemoveTrack(trackId);
+}
+
+void
+RemoteSourceStreamInfo::SyncPipeline(
+ RefPtr<MediaPipelineReceive> aPipeline)
+{
+ // See if we have both audio and video here, and if so cross the streams and
+ // sync them
+ // TODO: Do we need to prevent multiple syncs if there is more than one audio
+ // or video track in a single media stream? What are we supposed to do in this
+ // case?
+ for (auto i = mPipelines.begin(); i != mPipelines.end(); ++i) {
+ if (i->second->IsVideo() != aPipeline->IsVideo()) {
+ // Ok, we have one video, one non-video - cross the streams!
+ WebrtcAudioConduit *audio_conduit =
+ static_cast<WebrtcAudioConduit*>(aPipeline->IsVideo() ?
+ i->second->Conduit() :
+ aPipeline->Conduit());
+ WebrtcVideoConduit *video_conduit =
+ static_cast<WebrtcVideoConduit*>(aPipeline->IsVideo() ?
+ aPipeline->Conduit() :
+ i->second->Conduit());
+ video_conduit->SyncTo(audio_conduit);
+ CSFLogDebug(logTag, "Syncing %p to %p, %s to %s",
+ video_conduit, audio_conduit,
+ i->first.c_str(), aPipeline->trackid().c_str());
+ }
+ }
+}
+
+void
+RemoteSourceStreamInfo::StartReceiving()
+{
+ if (mReceiving || mPipelines.empty()) {
+ return;
+ }
+
+ mReceiving = true;
+
+ SourceMediaStream* source = GetMediaStream()->GetInputStream()->AsSourceStream();
+ source->SetPullEnabled(true);
+ // AdvanceKnownTracksTicksTime(HEAT_DEATH_OF_UNIVERSE) means that in
+ // theory per the API, we can't add more tracks before that
+ // time. However, the impl actually allows it, and it avoids a whole
+ // bunch of locking that would be required (and potential blocking)
+ // if we used smaller values and updated them on each NotifyPull.
+ source->AdvanceKnownTracksTime(STREAM_TIME_MAX);
+ CSFLogDebug(logTag, "Finished adding tracks to MediaStream %p", source);
+}
+
+RefPtr<MediaPipeline> SourceStreamInfo::GetPipelineByTrackId_m(
+ const std::string& trackId) {
+ ASSERT_ON_THREAD(mParent->GetMainThread());
+
+ // Refuse to hand out references if we're tearing down.
+ // (Since teardown involves a dispatch to and from STS before MediaPipelines
+ // are released, it is safe to start other dispatches to and from STS with a
+ // RefPtr<MediaPipeline>, since that reference won't be the last one
+ // standing)
+ if (mMediaStream) {
+ if (mPipelines.count(trackId)) {
+ return mPipelines[trackId];
+ }
+ }
+
+ return nullptr;
+}
+
+already_AddRefed<MediaPipeline>
+LocalSourceStreamInfo::ForgetPipelineByTrackId_m(const std::string& trackId)
+{
+ ASSERT_ON_THREAD(mParent->GetMainThread());
+
+ // Refuse to hand out references if we're tearing down.
+ // (Since teardown involves a dispatch to and from STS before MediaPipelines
+ // are released, it is safe to start other dispatches to and from STS with a
+ // RefPtr<MediaPipeline>, since that reference won't be the last one
+ // standing)
+ if (mMediaStream) {
+ if (mPipelines.count(trackId)) {
+ RefPtr<MediaPipeline> pipeline(mPipelines[trackId]);
+ mPipelines.erase(trackId);
+ return pipeline.forget();
+ }
+ }
+
+ return nullptr;
+}
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+auto
+RemoteTrackSource::ApplyConstraints(
+ nsPIDOMWindowInner* aWindow,
+ const dom::MediaTrackConstraints& aConstraints) -> already_AddRefed<PledgeVoid>
+{
+ RefPtr<PledgeVoid> p = new PledgeVoid();
+ p->Reject(new dom::MediaStreamError(aWindow,
+ NS_LITERAL_STRING("OverconstrainedError"),
+ NS_LITERAL_STRING("")));
+ return p.forget();
+}
+#endif
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
new file mode 100644
index 000000000..c0001a5e5
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
@@ -0,0 +1,586 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _PEER_CONNECTION_MEDIA_H_
+#define _PEER_CONNECTION_MEDIA_H_
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include "nspr.h"
+#include "prlock.h"
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIProtocolProxyCallback.h"
+
+#include "signaling/src/jsep/JsepSession.h"
+#include "AudioSegment.h"
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+#include "Layers.h"
+#include "VideoUtils.h"
+#include "ImageLayers.h"
+#include "VideoSegment.h"
+#include "MediaStreamTrack.h"
+#endif
+
+class nsIPrincipal;
+
+namespace mozilla {
+class DataChannel;
+class PeerIdentity;
+class MediaPipelineFactory;
+namespace dom {
+struct RTCInboundRTPStreamStats;
+struct RTCOutboundRTPStreamStats;
+}
+}
+
+#include "nricectxhandler.h"
+#include "nriceresolver.h"
+#include "nricemediastream.h"
+#include "MediaPipeline.h"
+
+namespace mozilla {
+
+class PeerConnectionImpl;
+class PeerConnectionMedia;
+class PCUuidGenerator;
+
+class SourceStreamInfo {
+public:
+ SourceStreamInfo(DOMMediaStream* aMediaStream,
+ PeerConnectionMedia *aParent,
+ const std::string& aId)
+ : mMediaStream(aMediaStream),
+ mParent(aParent),
+ mId(aId) {
+ MOZ_ASSERT(mMediaStream);
+ }
+
+ SourceStreamInfo(already_AddRefed<DOMMediaStream>& aMediaStream,
+ PeerConnectionMedia *aParent,
+ const std::string& aId)
+ : mMediaStream(aMediaStream),
+ mParent(aParent),
+ mId(aId) {
+ MOZ_ASSERT(mMediaStream);
+ }
+
+ virtual ~SourceStreamInfo() {}
+
+ DOMMediaStream* GetMediaStream() const {
+ return mMediaStream;
+ }
+
+ nsresult StorePipeline(const std::string& trackId,
+ const RefPtr<MediaPipeline>& aPipeline);
+
+ virtual void AddTrack(const std::string& trackId,
+ const RefPtr<dom::MediaStreamTrack>& aTrack)
+ {
+ mTracks.insert(std::make_pair(trackId, aTrack));
+ }
+ virtual void RemoveTrack(const std::string& trackId);
+ bool HasTrack(const std::string& trackId) const
+ {
+ return !!mTracks.count(trackId);
+ }
+ size_t GetTrackCount() const { return mTracks.size(); }
+
+ // This method exists for stats and the unittests.
+ // It allows visibility into the pipelines and flows.
+ const std::map<std::string, RefPtr<MediaPipeline>>&
+ GetPipelines() const { return mPipelines; }
+ RefPtr<MediaPipeline> GetPipelineByTrackId_m(const std::string& trackId);
+ // This is needed so PeerConnectionImpl can unregister itself as
+ // PrincipalChangeObserver from each track.
+ const std::map<std::string, RefPtr<dom::MediaStreamTrack>>&
+ GetMediaStreamTracks() const { return mTracks; }
+ dom::MediaStreamTrack* GetTrackById(const std::string& trackId) const
+ {
+ auto it = mTracks.find(trackId);
+ if (it == mTracks.end()) {
+ return nullptr;
+ }
+
+ return it->second;
+ }
+ const std::string& GetId() const { return mId; }
+
+ void DetachTransport_s();
+ virtual void DetachMedia_m();
+ bool AnyCodecHasPluginID(uint64_t aPluginID);
+protected:
+ void EndTrack(MediaStream* stream, dom::MediaStreamTrack* track);
+ RefPtr<DOMMediaStream> mMediaStream;
+ PeerConnectionMedia *mParent;
+ const std::string mId;
+ // These get set up before we generate our local description, the pipelines
+ // and conduits are set up once offer/answer completes.
+ std::map<std::string, RefPtr<dom::MediaStreamTrack>> mTracks;
+ std::map<std::string, RefPtr<MediaPipeline>> mPipelines;
+};
+
+// TODO(ekr@rtfm.com): Refactor {Local,Remote}SourceStreamInfo
+// bug 837539.
+class LocalSourceStreamInfo : public SourceStreamInfo {
+ ~LocalSourceStreamInfo() {
+ mMediaStream = nullptr;
+ }
+public:
+ LocalSourceStreamInfo(DOMMediaStream *aMediaStream,
+ PeerConnectionMedia *aParent,
+ const std::string& aId)
+ : SourceStreamInfo(aMediaStream, aParent, aId) {}
+
+ nsresult TakePipelineFrom(RefPtr<LocalSourceStreamInfo>& info,
+ const std::string& oldTrackId,
+ dom::MediaStreamTrack& aNewTrack,
+ const std::string& newTrackId);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ void UpdateSinkIdentity_m(dom::MediaStreamTrack* aTrack,
+ nsIPrincipal* aPrincipal,
+ const PeerIdentity* aSinkIdentity);
+#endif
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(LocalSourceStreamInfo)
+
+private:
+ already_AddRefed<MediaPipeline> ForgetPipelineByTrackId_m(
+ const std::string& trackId);
+};
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+class RemoteTrackSource : public dom::MediaStreamTrackSource
+{
+public:
+ explicit RemoteTrackSource(nsIPrincipal* aPrincipal, const nsString& aLabel)
+ : dom::MediaStreamTrackSource(aPrincipal, aLabel) {}
+
+ dom::MediaSourceEnum GetMediaSource() const override
+ {
+ return dom::MediaSourceEnum::Other;
+ }
+
+ already_AddRefed<PledgeVoid>
+ ApplyConstraints(nsPIDOMWindowInner* aWindow,
+ const dom::MediaTrackConstraints& aConstraints) override;
+
+ void Stop() override
+ {
+ // XXX (Bug 1314270): Implement rejection logic if necessary when we have
+ // clarity in the spec.
+ }
+
+ void SetPrincipal(nsIPrincipal* aPrincipal)
+ {
+ mPrincipal = aPrincipal;
+ PrincipalChanged();
+ }
+
+protected:
+ virtual ~RemoteTrackSource() {}
+};
+#endif
+
+class RemoteSourceStreamInfo : public SourceStreamInfo {
+ ~RemoteSourceStreamInfo() {}
+ public:
+ RemoteSourceStreamInfo(already_AddRefed<DOMMediaStream> aMediaStream,
+ PeerConnectionMedia *aParent,
+ const std::string& aId)
+ : SourceStreamInfo(aMediaStream, aParent, aId),
+ mReceiving(false)
+ {
+ }
+
+ void DetachMedia_m() override;
+ void RemoveTrack(const std::string& trackId) override;
+ void SyncPipeline(RefPtr<MediaPipelineReceive> aPipeline);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ void UpdatePrincipal_m(nsIPrincipal* aPrincipal);
+#endif
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RemoteSourceStreamInfo)
+
+ void AddTrack(const std::string& trackId,
+ const RefPtr<dom::MediaStreamTrack>& aTrack) override
+ {
+ SourceStreamInfo::AddTrack(trackId, aTrack);
+ }
+
+ TrackID GetNumericTrackId(const std::string& trackId) const
+ {
+ dom::MediaStreamTrack* track = GetTrackById(trackId);
+ if (!track) {
+ return TRACK_INVALID;
+ }
+ return track->mTrackID;
+ }
+
+ void StartReceiving();
+
+ private:
+ // True iff SetPullEnabled(true) has been called on the DOMMediaStream. This
+ // happens when offer/answer concludes.
+ bool mReceiving;
+};
+
+class PeerConnectionMedia : public sigslot::has_slots<> {
+ ~PeerConnectionMedia()
+ {
+ MOZ_RELEASE_ASSERT(!mMainThread);
+ }
+
+ public:
+ explicit PeerConnectionMedia(PeerConnectionImpl *parent);
+
+ enum IceRestartState { ICE_RESTART_NONE,
+ ICE_RESTART_PROVISIONAL,
+ ICE_RESTART_COMMITTED
+ };
+
+ PeerConnectionImpl* GetPC() { return mParent; }
+ nsresult Init(const std::vector<NrIceStunServer>& stun_servers,
+ const std::vector<NrIceTurnServer>& turn_servers,
+ NrIceCtx::Policy policy);
+ // WARNING: This destroys the object!
+ void SelfDestruct();
+
+ RefPtr<NrIceCtxHandler> ice_ctx_hdlr() const { return mIceCtxHdlr; }
+ RefPtr<NrIceCtx> ice_ctx() const { return mIceCtxHdlr->ctx(); }
+
+ RefPtr<NrIceMediaStream> ice_media_stream(size_t i) const {
+ return mIceCtxHdlr->ctx()->GetStream(i);
+ }
+
+ size_t num_ice_media_streams() const {
+ return mIceCtxHdlr->ctx()->GetStreamCount();
+ }
+
+ // Ensure ICE transports exist that we might need when offer/answer concludes
+ void EnsureTransports(const JsepSession& aSession);
+
+ // Activate or remove ICE transports at the conclusion of offer/answer,
+ // or when rollback occurs.
+ void ActivateOrRemoveTransports(const JsepSession& aSession);
+
+ // Start ICE checks.
+ void StartIceChecks(const JsepSession& session);
+
+ bool IsIceRestarting() const;
+ IceRestartState GetIceRestartState() const;
+
+ // Begin ICE restart
+ void BeginIceRestart(const std::string& ufrag,
+ const std::string& pwd);
+ // Commit ICE Restart - offer/answer complete, no rollback possible
+ void CommitIceRestart();
+ // Finalize ICE restart
+ void FinalizeIceRestart();
+ // Abort ICE restart
+ void RollbackIceRestart();
+
+ // Process a trickle ICE candidate.
+ void AddIceCandidate(const std::string& candidate, const std::string& mid,
+ uint32_t aMLine);
+
+ // Handle complete media pipelines.
+ nsresult UpdateMediaPipelines(const JsepSession& session);
+
+ // Add a track (main thread only)
+ nsresult AddTrack(DOMMediaStream& aMediaStream,
+ const std::string& streamId,
+ dom::MediaStreamTrack& aTrack,
+ const std::string& trackId);
+
+ nsresult RemoveLocalTrack(const std::string& streamId,
+ const std::string& trackId);
+ nsresult RemoveRemoteTrack(const std::string& streamId,
+ const std::string& trackId);
+
+ // Get a specific local stream
+ uint32_t LocalStreamsLength()
+ {
+ return mLocalSourceStreams.Length();
+ }
+ LocalSourceStreamInfo* GetLocalStreamByIndex(int index);
+ LocalSourceStreamInfo* GetLocalStreamById(const std::string& id);
+ LocalSourceStreamInfo* GetLocalStreamByTrackId(const std::string& id);
+
+ // Get a specific remote stream
+ uint32_t RemoteStreamsLength()
+ {
+ return mRemoteSourceStreams.Length();
+ }
+
+ RemoteSourceStreamInfo* GetRemoteStreamByIndex(size_t index);
+ RemoteSourceStreamInfo* GetRemoteStreamById(const std::string& id);
+ RemoteSourceStreamInfo* GetRemoteStreamByTrackId(const std::string& id);
+
+ // Add a remote stream.
+ nsresult AddRemoteStream(RefPtr<RemoteSourceStreamInfo> aInfo);
+
+ nsresult ReplaceTrack(const std::string& aOldStreamId,
+ const std::string& aOldTrackId,
+ dom::MediaStreamTrack& aNewTrack,
+ const std::string& aNewStreamId,
+ const std::string& aNewTrackId);
+
+#if !defined(MOZILLA_EXTERNAL_LINKAGE)
+ // In cases where the peer isn't yet identified, we disable the pipeline (not
+ // the stream, that would potentially affect others), so that it sends
+ // black/silence. Once the peer is identified, re-enable those streams.
+ // aTrack will be set if this update came from a principal change on aTrack.
+ void UpdateSinkIdentity_m(dom::MediaStreamTrack* aTrack,
+ nsIPrincipal* aPrincipal,
+ const PeerIdentity* aSinkIdentity);
+ // this determines if any track is peerIdentity constrained
+ bool AnyLocalTrackHasPeerIdentity() const;
+ // When we finally learn who is on the other end, we need to change the ownership
+ // on streams
+ void UpdateRemoteStreamPrincipals_m(nsIPrincipal* aPrincipal);
+#endif
+
+ bool AnyCodecHasPluginID(uint64_t aPluginID);
+
+ const nsCOMPtr<nsIThread>& GetMainThread() const { return mMainThread; }
+ const nsCOMPtr<nsIEventTarget>& GetSTSThread() const { return mSTSThread; }
+
+ static size_t GetTransportFlowIndex(int aStreamIndex, bool aRtcp)
+ {
+ return aStreamIndex * 2 + (aRtcp ? 1 : 0);
+ }
+
+ // Get a transport flow either RTP/RTCP for a particular stream
+ // A stream can be of audio/video/datachannel/budled(?) types
+ RefPtr<TransportFlow> GetTransportFlow(int aStreamIndex, bool aIsRtcp) {
+ int index_inner = GetTransportFlowIndex(aStreamIndex, aIsRtcp);
+
+ if (mTransportFlows.find(index_inner) == mTransportFlows.end())
+ return nullptr;
+
+ return mTransportFlows[index_inner];
+ }
+
+ // Add a transport flow
+ void AddTransportFlow(int aIndex, bool aRtcp,
+ const RefPtr<TransportFlow> &aFlow);
+ void RemoveTransportFlow(int aIndex, bool aRtcp);
+ void ConnectDtlsListener_s(const RefPtr<TransportFlow>& aFlow);
+ void DtlsConnected_s(TransportLayer* aFlow,
+ TransportLayer::State state);
+ static void DtlsConnected_m(const std::string& aParentHandle,
+ bool aPrivacyRequested);
+
+ RefPtr<AudioSessionConduit> GetAudioConduit(size_t level) {
+ auto it = mConduits.find(level);
+ if (it == mConduits.end()) {
+ return nullptr;
+ }
+
+ if (it->second.first) {
+ MOZ_ASSERT(false, "In GetAudioConduit, we found a video conduit!");
+ return nullptr;
+ }
+
+ return RefPtr<AudioSessionConduit>(
+ static_cast<AudioSessionConduit*>(it->second.second.get()));
+ }
+
+ RefPtr<VideoSessionConduit> GetVideoConduit(size_t level) {
+ auto it = mConduits.find(level);
+ if (it == mConduits.end()) {
+ return nullptr;
+ }
+
+ if (!it->second.first) {
+ MOZ_ASSERT(false, "In GetVideoConduit, we found an audio conduit!");
+ return nullptr;
+ }
+
+ return RefPtr<VideoSessionConduit>(
+ static_cast<VideoSessionConduit*>(it->second.second.get()));
+ }
+
+ // Add a conduit
+ void AddAudioConduit(size_t level, const RefPtr<AudioSessionConduit> &aConduit) {
+ mConduits[level] = std::make_pair(false, aConduit);
+ }
+
+ void AddVideoConduit(size_t level, const RefPtr<VideoSessionConduit> &aConduit) {
+ mConduits[level] = std::make_pair(true, aConduit);
+ }
+
+ // ICE state signals
+ sigslot::signal2<NrIceCtx*, NrIceCtx::GatheringState>
+ SignalIceGatheringStateChange;
+ sigslot::signal2<NrIceCtx*, NrIceCtx::ConnectionState>
+ SignalIceConnectionStateChange;
+ // This passes a candidate:... attribute and level
+ sigslot::signal2<const std::string&, uint16_t> SignalCandidate;
+ // This passes address, port, level of the default candidate.
+ sigslot::signal5<const std::string&, uint16_t,
+ const std::string&, uint16_t, uint16_t>
+ SignalUpdateDefaultCandidate;
+ sigslot::signal1<uint16_t>
+ SignalEndOfLocalCandidates;
+
+ private:
+ nsresult InitProxy();
+ class ProtocolProxyQueryHandler : public nsIProtocolProxyCallback {
+ public:
+ explicit ProtocolProxyQueryHandler(PeerConnectionMedia *pcm) :
+ pcm_(pcm) {}
+
+ NS_IMETHOD OnProxyAvailable(nsICancelable *request,
+ nsIChannel *aChannel,
+ nsIProxyInfo *proxyinfo,
+ nsresult result) override;
+ NS_DECL_ISUPPORTS
+
+ private:
+ void SetProxyOnPcm(nsIProxyInfo& proxyinfo);
+ RefPtr<PeerConnectionMedia> pcm_;
+ virtual ~ProtocolProxyQueryHandler() {}
+ };
+
+ // Shutdown media transport. Must be called on STS thread.
+ void ShutdownMediaTransport_s();
+
+ // Final destruction of the media stream. Must be called on the main
+ // thread.
+ void SelfDestruct_m();
+
+ // Manage ICE transports.
+ void EnsureTransport_s(size_t aLevel, size_t aComponentCount);
+ void ActivateOrRemoveTransport_s(
+ size_t aMLine,
+ size_t aComponentCount,
+ const std::string& aUfrag,
+ const std::string& aPassword,
+ const std::vector<std::string>& aCandidateList);
+ void RemoveTransportsAtOrAfter_s(size_t aMLine);
+
+ void GatherIfReady();
+ void FlushIceCtxOperationQueueIfReady();
+ void PerformOrEnqueueIceCtxOperation(nsIRunnable* runnable);
+ void EnsureIceGathering_s(bool aDefaultRouteOnly, bool aProxyOnly);
+ void StartIceChecks_s(bool aIsControlling,
+ bool aIsIceLite,
+ const std::vector<std::string>& aIceOptionsList);
+
+ void BeginIceRestart_s(RefPtr<NrIceCtx> new_ctx);
+ void FinalizeIceRestart_s();
+ void RollbackIceRestart_s();
+ bool GetPrefDefaultAddressOnly() const;
+ bool GetPrefProxyOnly() const;
+
+ void ConnectSignals(NrIceCtx *aCtx, NrIceCtx *aOldCtx=nullptr);
+
+ // Process a trickle ICE candidate.
+ void AddIceCandidate_s(const std::string& aCandidate, const std::string& aMid,
+ uint32_t aMLine);
+
+
+ // ICE events
+ void IceGatheringStateChange_s(NrIceCtx* ctx,
+ NrIceCtx::GatheringState state);
+ void IceConnectionStateChange_s(NrIceCtx* ctx,
+ NrIceCtx::ConnectionState state);
+ void IceStreamReady_s(NrIceMediaStream *aStream);
+ void OnCandidateFound_s(NrIceMediaStream *aStream,
+ const std::string& aCandidate);
+ void EndOfLocalCandidates(const std::string& aDefaultAddr,
+ uint16_t aDefaultPort,
+ const std::string& aDefaultRtcpAddr,
+ uint16_t aDefaultRtcpPort,
+ uint16_t aMLine);
+ void GetDefaultCandidates(const NrIceMediaStream& aStream,
+ NrIceCandidate* aCandidate,
+ NrIceCandidate* aRtcpCandidate);
+
+ void IceGatheringStateChange_m(NrIceCtx* ctx,
+ NrIceCtx::GatheringState state);
+ void IceConnectionStateChange_m(NrIceCtx* ctx,
+ NrIceCtx::ConnectionState state);
+ void OnCandidateFound_m(const std::string& aCandidateLine,
+ const std::string& aDefaultAddr,
+ uint16_t aDefaultPort,
+ const std::string& aDefaultRtcpAddr,
+ uint16_t aDefaultRtcpPort,
+ uint16_t aMLine);
+ void EndOfLocalCandidates_m(const std::string& aDefaultAddr,
+ uint16_t aDefaultPort,
+ const std::string& aDefaultRtcpAddr,
+ uint16_t aDefaultRtcpPort,
+ uint16_t aMLine);
+ bool IsIceCtxReady() const {
+ return mProxyResolveCompleted;
+ }
+
+ // The parent PC
+ PeerConnectionImpl *mParent;
+ // and a loose handle on it for event driven stuff
+ std::string mParentHandle;
+ std::string mParentName;
+
+ // A list of streams returned from GetUserMedia
+ // This is only accessed on the main thread (with one special exception)
+ nsTArray<RefPtr<LocalSourceStreamInfo> > mLocalSourceStreams;
+
+ // A list of streams provided by the other side
+ // This is only accessed on the main thread (with one special exception)
+ nsTArray<RefPtr<RemoteSourceStreamInfo> > mRemoteSourceStreams;
+
+ std::map<size_t, std::pair<bool, RefPtr<MediaSessionConduit>>> mConduits;
+
+ // ICE objects
+ RefPtr<NrIceCtxHandler> mIceCtxHdlr;
+
+ // DNS
+ RefPtr<NrIceResolver> mDNSResolver;
+
+ // Transport flows: even is RTP, odd is RTCP
+ std::map<int, RefPtr<TransportFlow> > mTransportFlows;
+
+ // UUID Generator
+ UniquePtr<PCUuidGenerator> mUuidGen;
+
+ // The main thread.
+ nsCOMPtr<nsIThread> mMainThread;
+
+ // The STS thread.
+ nsCOMPtr<nsIEventTarget> mSTSThread;
+
+ // Used whenever we need to dispatch a runnable to STS to tweak something
+ // on our ICE ctx, but are not ready to do so at the moment (eg; we are
+ // waiting to get a callback with our http proxy config before we start
+ // gathering or start checking)
+ std::vector<nsCOMPtr<nsIRunnable>> mQueuedIceCtxOperations;
+
+ // Used to cancel any ongoing proxy request.
+ nsCOMPtr<nsICancelable> mProxyRequest;
+
+ // Used to track the state of the request.
+ bool mProxyResolveCompleted;
+
+ // Used to store the result of the request.
+ UniquePtr<NrIceProxyServer> mProxyServer;
+
+ // Used to track the state of ice restart
+ IceRestartState mIceRestartState;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PeerConnectionMedia)
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/peerconnection/WebrtcGlobalChild.h b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalChild.h
new file mode 100644
index 000000000..544315a3e
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalChild.h
@@ -0,0 +1,40 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _WEBRTC_GLOBAL_CHILD_H_
+#define _WEBRTC_GLOBAL_CHILD_H_
+
+#include "mozilla/dom/PWebrtcGlobalChild.h"
+
+namespace mozilla {
+namespace dom {
+
+class WebrtcGlobalChild :
+ public PWebrtcGlobalChild
+{
+ friend class ContentChild;
+
+ bool mShutdown;
+
+ MOZ_IMPLICIT WebrtcGlobalChild();
+ virtual void ActorDestroy(ActorDestroyReason aWhy) override;
+
+ virtual bool RecvGetStatsRequest(const int& aRequestId,
+ const nsString& aPcIdFilter) override;
+ virtual bool RecvClearStatsRequest() override;
+ virtual bool RecvGetLogRequest(const int& aReqestId,
+ const nsCString& aPattern) override;
+ virtual bool RecvClearLogRequest() override;
+ virtual bool RecvSetAecLogging(const bool& aEnable) override;
+ virtual bool RecvSetDebugMode(const int& aLevel) override;
+
+public:
+ virtual ~WebrtcGlobalChild();
+ static WebrtcGlobalChild* Create();
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif // _WEBRTC_GLOBAL_CHILD_H_
diff --git a/media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.cpp b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.cpp
new file mode 100644
index 000000000..96bdd5b70
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.cpp
@@ -0,0 +1,1241 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebrtcGlobalInformation.h"
+#include "mozilla/media/webrtc/WebrtcGlobal.h"
+#include "WebrtcGlobalChild.h"
+#include "WebrtcGlobalParent.h"
+
+#include <deque>
+#include <string>
+#include <algorithm>
+#include <vector>
+#include <map>
+#include <queue>
+
+#include "CSFLog.h"
+#include "WebRtcLog.h"
+#include "mozilla/dom/WebrtcGlobalInformationBinding.h"
+#include "mozilla/dom/ContentChild.h"
+
+#include "nsAutoPtr.h"
+#include "nsNetCID.h" // NS_SOCKETTRANSPORTSERVICE_CONTRACTID
+#include "nsServiceManagerUtils.h" // do_GetService
+#include "mozilla/ErrorResult.h"
+#include "mozilla/Vector.h"
+#include "nsProxyRelease.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Unused.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/RefPtr.h"
+
+#include "rlogconnector.h"
+#include "runnable_utils.h"
+#include "PeerConnectionCtx.h"
+#include "PeerConnectionImpl.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+
+static const char* logTag = "WebrtcGlobalInformation";
+
+namespace mozilla {
+namespace dom {
+
+typedef Vector<nsAutoPtr<RTCStatsQuery>> RTCStatsQueries;
+typedef nsTArray<RTCStatsReportInternal> Stats;
+
+template<class Request, typename Callback,
+ typename Result, typename QueryParam>
+class RequestManager
+{
+public:
+
+ static Request* Create(Callback& aCallback, QueryParam& aParam)
+ {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+
+ int id = ++sLastRequestId;
+ auto result = sRequests.insert(
+ std::make_pair(id, Request(id, aCallback, aParam)));
+
+ if (!result.second) {
+ return nullptr;
+ }
+
+ return &result.first->second;
+ }
+
+ static void Delete(int aId)
+ {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ sRequests.erase(aId);
+ }
+
+ static Request* Get(int aId)
+ {
+ mozilla::StaticMutexAutoLock lock(sMutex);
+ auto r = sRequests.find(aId);
+
+ if (r == sRequests.end()) {
+ return nullptr;
+ }
+
+ return &r->second;
+ }
+
+ Result mResult;
+ std::queue<RefPtr<WebrtcGlobalParent>> mContactList;
+ const int mRequestId;
+
+ RefPtr<WebrtcGlobalParent> GetNextParent()
+ {
+ while (!mContactList.empty()) {
+ RefPtr<WebrtcGlobalParent> next = mContactList.front();
+ mContactList.pop();
+ if (next->IsActive()) {
+ return next;
+ }
+ }
+
+ return nullptr;
+ }
+
+ void Complete()
+ {
+ ErrorResult rv;
+ mCallback.get()->Call(mResult, rv);
+
+ if (rv.Failed()) {
+ CSFLogError(logTag, "Error firing stats observer callback");
+ }
+ }
+
+protected:
+ // The mutex is used to protect two related operations involving the sRequest map
+ // and the sLastRequestId. For the map, it prevents more than one thread from
+ // adding or deleting map entries at the same time. For id generation,
+ // it creates an atomic allocation and increment.
+ static mozilla::StaticMutex sMutex;
+ static std::map<int, Request> sRequests;
+ static int sLastRequestId;
+
+ Callback mCallback;
+
+ explicit RequestManager(int aId, Callback& aCallback)
+ : mRequestId(aId)
+ , mCallback(aCallback)
+ {}
+ ~RequestManager() {}
+private:
+
+ RequestManager() = delete;
+ RequestManager& operator=(const RequestManager&) = delete;
+};
+
+template<class Request, typename Callback,
+ typename Result, typename QueryParam>
+mozilla::StaticMutex RequestManager<Request, Callback, Result, QueryParam>::sMutex;
+template<class Request, typename Callback,
+ typename Result, typename QueryParam>
+std::map<int, Request> RequestManager<Request, Callback, Result, QueryParam>::sRequests;
+template<class Request, typename Callback,
+ typename Result, typename QueryParam>
+int RequestManager<Request, Callback, Result, QueryParam>::sLastRequestId;
+
+typedef nsMainThreadPtrHandle<WebrtcGlobalStatisticsCallback> StatsRequestCallback;
+
+class StatsRequest
+ : public RequestManager<StatsRequest,
+ StatsRequestCallback,
+ WebrtcGlobalStatisticsReport,
+ nsAString>
+{
+public:
+ const nsString mPcIdFilter;
+ explicit StatsRequest(int aId, StatsRequestCallback& aCallback, nsAString& aFilter)
+ : RequestManager(aId, aCallback)
+ , mPcIdFilter(aFilter)
+ {
+ mResult.mReports.Construct();
+ }
+
+private:
+ StatsRequest() = delete;
+ StatsRequest& operator=(const StatsRequest&) = delete;
+};
+
+typedef nsMainThreadPtrHandle<WebrtcGlobalLoggingCallback> LogRequestCallback;
+
+class LogRequest
+ : public RequestManager<LogRequest,
+ LogRequestCallback,
+ Sequence<nsString>,
+ const nsACString>
+{
+public:
+ const nsCString mPattern;
+ explicit LogRequest(int aId, LogRequestCallback& aCallback, const nsACString& aPattern)
+ : RequestManager(aId, aCallback)
+ , mPattern(aPattern)
+ {}
+
+private:
+ LogRequest() = delete;
+ LogRequest& operator=(const LogRequest&) = delete;
+};
+
+class WebrtcContentParents
+{
+public:
+ static WebrtcGlobalParent* Alloc();
+ static void Dealloc(WebrtcGlobalParent* aParent);
+ static bool Empty()
+ {
+ return sContentParents.empty();
+ }
+ static const std::vector<RefPtr<WebrtcGlobalParent>>& GetAll()
+ {
+ return sContentParents;
+ }
+private:
+ static std::vector<RefPtr<WebrtcGlobalParent>> sContentParents;
+ WebrtcContentParents() = delete;
+ WebrtcContentParents(const WebrtcContentParents&) = delete;
+ WebrtcContentParents& operator=(const WebrtcContentParents&) = delete;
+};
+
+std::vector<RefPtr<WebrtcGlobalParent>> WebrtcContentParents::sContentParents;
+
+WebrtcGlobalParent* WebrtcContentParents::Alloc()
+{
+ RefPtr<WebrtcGlobalParent> cp = new WebrtcGlobalParent;
+ sContentParents.push_back(cp);
+ return cp.get();
+}
+
+void WebrtcContentParents::Dealloc(WebrtcGlobalParent* aParent)
+{
+ if (aParent) {
+ aParent->mShutdown = true;
+ auto cp = std::find(sContentParents.begin(), sContentParents.end(), aParent);
+ if (cp != sContentParents.end()) {
+ sContentParents.erase(cp);
+ }
+ }
+}
+
+static PeerConnectionCtx* GetPeerConnectionCtx()
+{
+ if(PeerConnectionCtx::isActive()) {
+ MOZ_ASSERT(PeerConnectionCtx::GetInstance());
+ return PeerConnectionCtx::GetInstance();
+ }
+ return nullptr;
+}
+
+static void
+OnStatsReport_m(WebrtcGlobalChild* aThisChild,
+ const int aRequestId,
+ nsAutoPtr<RTCStatsQueries> aQueryList)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aQueryList);
+
+ if (aThisChild) {
+ Stats stats;
+
+ // Copy stats generated for the currently active PeerConnections
+ for (auto&& query : *aQueryList) {
+ stats.AppendElement(*(query->report));
+ }
+ // Reports saved for closed/destroyed PeerConnections
+ auto ctx = PeerConnectionCtx::GetInstance();
+ if (ctx) {
+ for (auto&& pc : ctx->mStatsForClosedPeerConnections) {
+ stats.AppendElement(pc);
+ }
+ }
+
+ Unused << aThisChild->SendGetStatsResult(aRequestId, stats);
+ return;
+ }
+
+ // This is the last stats report to be collected. (Must be the gecko process).
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ StatsRequest* request = StatsRequest::Get(aRequestId);
+
+ if (!request) {
+ CSFLogError(logTag, "Bad RequestId");
+ return;
+ }
+
+ for (auto&& query : *aQueryList) {
+ request->mResult.mReports.Value().AppendElement(*(query->report), fallible);
+ }
+
+ // Reports saved for closed/destroyed PeerConnections
+ auto ctx = PeerConnectionCtx::GetInstance();
+ if (ctx) {
+ for (auto&& pc : ctx->mStatsForClosedPeerConnections) {
+ request->mResult.mReports.Value().AppendElement(pc, fallible);
+ }
+ }
+
+ request->Complete();
+ StatsRequest::Delete(aRequestId);
+}
+
+static void
+GetAllStats_s(WebrtcGlobalChild* aThisChild,
+ const int aRequestId,
+ nsAutoPtr<RTCStatsQueries> aQueryList)
+{
+ MOZ_ASSERT(aQueryList);
+ // The call to PeerConnetionImpl must happen from a runnable
+ // dispatched on the STS thread.
+
+ // Get stats from active connections.
+ for (auto&& query : *aQueryList) {
+ PeerConnectionImpl::ExecuteStatsQuery_s(query);
+ }
+
+ // After the RTCStatsQueries have been filled in, control must return
+ // to the main thread before their eventual destruction.
+ NS_DispatchToMainThread(WrapRunnableNM(&OnStatsReport_m,
+ aThisChild,
+ aRequestId,
+ aQueryList),
+ NS_DISPATCH_NORMAL);
+}
+
+static void OnGetLogging_m(WebrtcGlobalChild* aThisChild,
+ const int aRequestId,
+ nsAutoPtr<std::deque<std::string>> aLogList)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (aThisChild) {
+ // Add this log to the collection of logs and call into
+ // the next content process.
+ Sequence<nsString> nsLogs;
+
+ if (!aLogList->empty()) {
+ for (auto& line : *aLogList) {
+ nsLogs.AppendElement(NS_ConvertUTF8toUTF16(line.c_str()), fallible);
+ }
+ nsLogs.AppendElement(NS_LITERAL_STRING("+++++++ END ++++++++"), fallible);
+ }
+
+ Unused << aThisChild->SendGetLogResult(aRequestId, nsLogs);
+ return;
+ }
+
+ // This is the last log to be collected. (Must be the gecko process).
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ LogRequest* request = LogRequest::Get(aRequestId);
+
+ if (!request) {
+ CSFLogError(logTag, "Bad RequestId");
+ return;
+ }
+
+ if (!aLogList->empty()) {
+ for (auto& line : *aLogList) {
+ request->mResult.AppendElement(NS_ConvertUTF8toUTF16(line.c_str()),
+ fallible);
+ }
+ request->mResult.AppendElement(NS_LITERAL_STRING("+++++++ END ++++++++"),
+ fallible);
+ }
+
+ request->Complete();
+ LogRequest::Delete(aRequestId);
+}
+
+static void GetLogging_s(WebrtcGlobalChild* aThisChild,
+ const int aRequestId,
+ const std::string& aPattern)
+{
+ // Request log while not on the main thread.
+ RLogConnector* logs = RLogConnector::GetInstance();
+ nsAutoPtr<std::deque<std::string>> result(new std::deque<std::string>);
+ // Might not exist yet.
+ if (logs) {
+ logs->Filter(aPattern, 0, result);
+ }
+ // Return to main thread to complete processing.
+ NS_DispatchToMainThread(WrapRunnableNM(&OnGetLogging_m,
+ aThisChild,
+ aRequestId,
+ result),
+ NS_DISPATCH_NORMAL);
+}
+
+static nsresult
+BuildStatsQueryList(
+ const std::map<const std::string, PeerConnectionImpl *>& aPeerConnections,
+ const nsAString& aPcIdFilter,
+ RTCStatsQueries* queries)
+{
+ nsresult rv;
+
+ for (auto&& pc : aPeerConnections) {
+ MOZ_ASSERT(pc.second);
+ if (aPcIdFilter.IsEmpty() ||
+ aPcIdFilter.EqualsASCII(pc.second->GetIdAsAscii().c_str())) {
+ if (pc.second->HasMedia()) {
+ if (!queries->append(nsAutoPtr<RTCStatsQuery>(new RTCStatsQuery(true)))) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ rv = pc.second->BuildStatsQuery_m(nullptr, queries->back()); // all tracks
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ MOZ_ASSERT(queries->back()->report);
+ }
+ }
+ }
+
+ return NS_OK;
+}
+
+static nsresult
+RunStatsQuery(
+ const std::map<const std::string, PeerConnectionImpl *>& aPeerConnections,
+ const nsAString& aPcIdFilter,
+ WebrtcGlobalChild* aThisChild,
+ const int aRequestId)
+{
+ nsAutoPtr<RTCStatsQueries> queries(new RTCStatsQueries);
+ nsresult rv = BuildStatsQueryList(aPeerConnections, aPcIdFilter, queries);
+
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ nsCOMPtr<nsIEventTarget> stsThread =
+ do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
+
+ if (NS_FAILED(rv)) {
+ return rv;
+ } else if (!stsThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ rv = RUN_ON_THREAD(stsThread,
+ WrapRunnableNM(&GetAllStats_s,
+ aThisChild,
+ aRequestId,
+ queries),
+ NS_DISPATCH_NORMAL);
+ return rv;
+}
+
+void ClearClosedStats()
+{
+ PeerConnectionCtx* ctx = GetPeerConnectionCtx();
+
+ if (ctx) {
+ ctx->mStatsForClosedPeerConnections.Clear();
+ }
+}
+
+void
+WebrtcGlobalInformation::ClearAllStats(
+ const GlobalObject& aGlobal)
+{
+ if (!NS_IsMainThread()) {
+ return;
+ }
+
+ // Chrome-only API
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ if (!WebrtcContentParents::Empty()) {
+ // Pass on the request to any content process based PeerConnections.
+ for (auto& cp : WebrtcContentParents::GetAll()) {
+ Unused << cp->SendClearStatsRequest();
+ }
+ }
+
+ // Flush the history for the chrome process
+ ClearClosedStats();
+}
+
+void
+WebrtcGlobalInformation::GetAllStats(
+ const GlobalObject& aGlobal,
+ WebrtcGlobalStatisticsCallback& aStatsCallback,
+ const Optional<nsAString>& pcIdFilter,
+ ErrorResult& aRv)
+{
+ if (!NS_IsMainThread()) {
+ aRv.Throw(NS_ERROR_NOT_SAME_THREAD);
+ return;
+ }
+
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ // CallbackObject does not support threadsafe refcounting, and must be
+ // used and destroyed on main.
+ StatsRequestCallback callbackHandle(
+ new nsMainThreadPtrHolder<WebrtcGlobalStatisticsCallback>(&aStatsCallback));
+
+ nsString filter;
+ if (pcIdFilter.WasPassed()) {
+ filter = pcIdFilter.Value();
+ }
+
+ auto* request = StatsRequest::Create(callbackHandle, filter);
+
+ if (!request) {
+ aRv.Throw(NS_ERROR_FAILURE);
+ return;
+ }
+
+ if (!WebrtcContentParents::Empty()) {
+ // Pass on the request to any content based PeerConnections.
+ for (auto& cp : WebrtcContentParents::GetAll()) {
+ request->mContactList.push(cp);
+ }
+
+ auto next = request->GetNextParent();
+ if (next) {
+ aRv = next->SendGetStatsRequest(request->mRequestId, request->mPcIdFilter) ?
+ NS_OK : NS_ERROR_FAILURE;
+ return;
+ }
+ }
+ // No content resident PeerConnectionCtx instances.
+ // Check this process.
+ PeerConnectionCtx* ctx = GetPeerConnectionCtx();
+ nsresult rv;
+
+ if (ctx) {
+ rv = RunStatsQuery(ctx->mGetPeerConnections(),
+ filter, nullptr, request->mRequestId);
+
+ if (NS_FAILED(rv)) {
+ StatsRequest::Delete(request->mRequestId);
+ }
+ } else {
+ // Just send back an empty report.
+ rv = NS_OK;
+ request->Complete();
+ StatsRequest::Delete(request->mRequestId);
+ }
+
+ aRv = rv;
+ return;
+}
+
+static nsresult
+RunLogQuery(const nsCString& aPattern,
+ WebrtcGlobalChild* aThisChild,
+ const int aRequestId)
+{
+ nsresult rv;
+ nsCOMPtr<nsIEventTarget> stsThread =
+ do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
+
+ if (NS_FAILED(rv)) {
+ return rv;
+ } else if (!stsThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ rv = RUN_ON_THREAD(stsThread,
+ WrapRunnableNM(&GetLogging_s,
+ aThisChild,
+ aRequestId,
+ aPattern.get()),
+ NS_DISPATCH_NORMAL);
+ return rv;
+}
+
+static void ClearLogs_s()
+{
+ // Make call off main thread.
+ RLogConnector* logs = RLogConnector::GetInstance();
+ if (logs) {
+ logs->Clear();
+ }
+}
+
+static nsresult
+RunLogClear()
+{
+ nsresult rv;
+ nsCOMPtr<nsIEventTarget> stsThread =
+ do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
+
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (!stsThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return RUN_ON_THREAD(stsThread,
+ WrapRunnableNM(&ClearLogs_s),
+ NS_DISPATCH_NORMAL);
+}
+
+void
+WebrtcGlobalInformation::ClearLogging(
+ const GlobalObject& aGlobal)
+{
+ if (!NS_IsMainThread()) {
+ return;
+ }
+
+ // Chrome-only API
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ if (!WebrtcContentParents::Empty()) {
+ // Clear content process signaling logs
+ for (auto& cp : WebrtcContentParents::GetAll()) {
+ Unused << cp->SendClearLogRequest();
+ }
+ }
+
+ // Clear chrome process signaling logs
+ Unused << RunLogClear();
+}
+
+void
+WebrtcGlobalInformation::GetLogging(
+ const GlobalObject& aGlobal,
+ const nsAString& aPattern,
+ WebrtcGlobalLoggingCallback& aLoggingCallback,
+ ErrorResult& aRv)
+{
+ if (!NS_IsMainThread()) {
+ aRv.Throw(NS_ERROR_NOT_SAME_THREAD);
+ return;
+ }
+
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ // CallbackObject does not support threadsafe refcounting, and must be
+ // destroyed on main.
+ LogRequestCallback callbackHandle(
+ new nsMainThreadPtrHolder<WebrtcGlobalLoggingCallback>(&aLoggingCallback));
+
+ nsAutoCString pattern;
+ CopyUTF16toUTF8(aPattern, pattern);
+
+ LogRequest* request = LogRequest::Create(callbackHandle, pattern);
+
+ if (!request) {
+ aRv.Throw(NS_ERROR_FAILURE);
+ return;
+ }
+
+ if (!WebrtcContentParents::Empty()) {
+ // Pass on the request to any content based PeerConnections.
+ for (auto& cp : WebrtcContentParents::GetAll()) {
+ request->mContactList.push(cp);
+ }
+
+ auto next = request->GetNextParent();
+ if (next) {
+ aRv = next->SendGetLogRequest(request->mRequestId, request->mPattern) ?
+ NS_OK : NS_ERROR_FAILURE;
+ return;
+ }
+ }
+
+ nsresult rv = RunLogQuery(request->mPattern, nullptr, request->mRequestId);
+
+ if (NS_FAILED(rv)) {
+ LogRequest::Delete(request->mRequestId);
+ }
+
+ aRv = rv;
+ return;
+}
+
+static int32_t sLastSetLevel = 0;
+static bool sLastAECDebug = false;
+
+void
+WebrtcGlobalInformation::SetDebugLevel(const GlobalObject& aGlobal, int32_t aLevel)
+{
+ if (aLevel) {
+ StartWebRtcLog(webrtc::TraceLevel(aLevel));
+ } else {
+ StopWebRtcLog();
+ }
+ sLastSetLevel = aLevel;
+
+ for (auto& cp : WebrtcContentParents::GetAll()){
+ Unused << cp->SendSetDebugMode(aLevel);
+ }
+}
+
+int32_t
+WebrtcGlobalInformation::DebugLevel(const GlobalObject& aGlobal)
+{
+ return sLastSetLevel;
+}
+
+void
+WebrtcGlobalInformation::SetAecDebug(const GlobalObject& aGlobal, bool aEnable)
+{
+ if (aEnable) {
+ StartAecLog();
+ } else {
+ StopAecLog();
+ }
+
+ sLastAECDebug = aEnable;
+
+ for (auto& cp : WebrtcContentParents::GetAll()){
+ Unused << cp->SendSetAecLogging(aEnable);
+ }
+}
+
+bool
+WebrtcGlobalInformation::AecDebug(const GlobalObject& aGlobal)
+{
+ return sLastAECDebug;
+}
+
+bool
+WebrtcGlobalParent::RecvGetStatsResult(const int& aRequestId,
+ nsTArray<RTCStatsReportInternal>&& Stats)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ nsresult rv = NS_OK;
+
+ StatsRequest* request = StatsRequest::Get(aRequestId);
+
+ if (!request) {
+ CSFLogError(logTag, "Bad RequestId");
+ return false;
+ }
+
+ for (auto&& s : Stats) {
+ request->mResult.mReports.Value().AppendElement(s, fallible);
+ }
+
+ auto next = request->GetNextParent();
+ if (next) {
+ // There are more content instances to query.
+ return next->SendGetStatsRequest(request->mRequestId, request->mPcIdFilter);
+ }
+
+ // Content queries complete, run chrome instance query if applicable
+ PeerConnectionCtx* ctx = GetPeerConnectionCtx();
+
+ if (ctx) {
+ rv = RunStatsQuery(ctx->mGetPeerConnections(),
+ request->mPcIdFilter, nullptr, aRequestId);
+ } else {
+ // No instance in the process, return the collections as is
+ request->Complete();
+ StatsRequest::Delete(aRequestId);
+ }
+
+ return NS_SUCCEEDED(rv);
+}
+
+bool
+WebrtcGlobalParent::RecvGetLogResult(const int& aRequestId,
+ const WebrtcGlobalLog& aLog)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ LogRequest* request = LogRequest::Get(aRequestId);
+
+ if (!request) {
+ CSFLogError(logTag, "Bad RequestId");
+ return false;
+ }
+ request->mResult.AppendElements(aLog, fallible);
+
+ auto next = request->GetNextParent();
+ if (next) {
+ // There are more content instances to query.
+ return next->SendGetLogRequest(request->mRequestId, request->mPattern);
+ }
+
+ // Content queries complete, run chrome instance query if applicable
+ nsresult rv = RunLogQuery(request->mPattern, nullptr, aRequestId);
+
+ if (NS_FAILED(rv)) {
+ //Unable to get gecko process log. Return what has been collected.
+ CSFLogError(logTag, "Unable to extract chrome process log");
+ request->Complete();
+ LogRequest::Delete(aRequestId);
+ }
+
+ return true;
+}
+
+WebrtcGlobalParent*
+WebrtcGlobalParent::Alloc()
+{
+ return WebrtcContentParents::Alloc();
+}
+
+bool
+WebrtcGlobalParent::Dealloc(WebrtcGlobalParent * aActor)
+{
+ WebrtcContentParents::Dealloc(aActor);
+ return true;
+}
+
+void
+WebrtcGlobalParent::ActorDestroy(ActorDestroyReason aWhy)
+{
+ mShutdown = true;
+ return;
+}
+
+bool
+WebrtcGlobalParent::Recv__delete__()
+{
+ return true;
+}
+
+MOZ_IMPLICIT WebrtcGlobalParent::WebrtcGlobalParent()
+ : mShutdown(false)
+{
+ MOZ_COUNT_CTOR(WebrtcGlobalParent);
+}
+
+MOZ_IMPLICIT WebrtcGlobalParent::~WebrtcGlobalParent()
+{
+ MOZ_COUNT_DTOR(WebrtcGlobalParent);
+}
+
+bool
+WebrtcGlobalChild::RecvGetStatsRequest(const int& aRequestId,
+ const nsString& aPcIdFilter)
+{
+ if (mShutdown) {
+ return true;
+ }
+
+ PeerConnectionCtx* ctx = GetPeerConnectionCtx();
+
+ if (ctx) {
+ nsresult rv = RunStatsQuery(ctx->mGetPeerConnections(),
+ aPcIdFilter, this, aRequestId);
+ return NS_SUCCEEDED(rv);
+ }
+
+ nsTArray<RTCStatsReportInternal> empty_stats;
+ SendGetStatsResult(aRequestId, empty_stats);
+
+ return true;
+}
+
+bool
+WebrtcGlobalChild::RecvClearStatsRequest()
+{
+ if (mShutdown) {
+ return true;
+ }
+
+ ClearClosedStats();
+ return true;
+}
+
+bool
+WebrtcGlobalChild::RecvGetLogRequest(const int& aRequestId,
+ const nsCString& aPattern)
+{
+ if (mShutdown) {
+ return true;
+ }
+
+ nsresult rv;
+ nsCOMPtr<nsIEventTarget> stsThread =
+ do_GetService(NS_SOCKETTRANSPORTSERVICE_CONTRACTID, &rv);
+
+ if (NS_SUCCEEDED(rv) && stsThread) {
+ rv = RUN_ON_THREAD(stsThread,
+ WrapRunnableNM(&GetLogging_s, this, aRequestId, aPattern.get()),
+ NS_DISPATCH_NORMAL);
+
+ if (NS_SUCCEEDED(rv)) {
+ return true;
+ }
+ }
+
+ Sequence<nsString> empty_log;
+ SendGetLogResult(aRequestId, empty_log);
+
+ return true;
+}
+
+bool
+WebrtcGlobalChild::RecvClearLogRequest()
+{
+ if (mShutdown) {
+ return true;
+ }
+
+ RunLogClear();
+ return true;
+}
+
+bool
+WebrtcGlobalChild::RecvSetAecLogging(const bool& aEnable)
+{
+ if (!mShutdown) {
+ if (aEnable) {
+ StartAecLog();
+ } else {
+ StopAecLog();
+ }
+ }
+ return true;
+}
+
+bool
+WebrtcGlobalChild::RecvSetDebugMode(const int& aLevel)
+{
+ if (!mShutdown) {
+ if (aLevel) {
+ StartWebRtcLog(webrtc::TraceLevel(aLevel));
+ } else {
+ StopWebRtcLog();
+ }
+ }
+ return true;
+}
+
+WebrtcGlobalChild*
+WebrtcGlobalChild::Create()
+{
+ WebrtcGlobalChild* child =
+ static_cast<WebrtcGlobalChild*>(
+ ContentChild::GetSingleton()->SendPWebrtcGlobalConstructor());
+ return child;
+}
+
+void
+WebrtcGlobalChild::ActorDestroy(ActorDestroyReason aWhy)
+{
+ mShutdown = true;
+}
+
+MOZ_IMPLICIT WebrtcGlobalChild::WebrtcGlobalChild()
+ : mShutdown(false)
+{
+ MOZ_COUNT_CTOR(WebrtcGlobalChild);
+}
+
+MOZ_IMPLICIT WebrtcGlobalChild::~WebrtcGlobalChild()
+{
+ MOZ_COUNT_DTOR(WebrtcGlobalChild);
+}
+
+struct StreamResult {
+ StreamResult() : candidateTypeBitpattern(0), streamSucceeded(false) {}
+ uint32_t candidateTypeBitpattern;
+ bool streamSucceeded;
+};
+
+static uint32_t GetCandidateIpAndTransportMask(const RTCIceCandidateStats *cand) {
+
+ enum {
+ CANDIDATE_BITMASK_UDP = 1,
+ CANDIDATE_BITMASK_TCP = 1 << 1,
+ CANDIDATE_BITMASK_IPV6 = 1 << 2,
+ };
+
+ uint32_t res = 0;
+
+ nsAutoCString transport;
+ // prefer local transport for local relay candidates
+ if (cand->mMozLocalTransport.WasPassed()) {
+ transport.Assign(NS_ConvertUTF16toUTF8(cand->mMozLocalTransport.Value()));
+ } else {
+ transport.Assign(NS_ConvertUTF16toUTF8(cand->mTransport.Value()));
+ }
+ if (transport == kNrIceTransportUdp) {
+ res |= CANDIDATE_BITMASK_UDP;
+ } else if (transport == kNrIceTransportTcp) {
+ res |= CANDIDATE_BITMASK_TCP;
+ }
+
+ if (cand->mIpAddress.Value().FindChar(':') != -1) {
+ res |= CANDIDATE_BITMASK_IPV6;
+ }
+
+ return res;
+};
+
+static void StoreLongTermICEStatisticsImpl_m(
+ nsresult result,
+ nsAutoPtr<RTCStatsQuery> query) {
+
+ using namespace Telemetry;
+
+ if (NS_FAILED(result) ||
+ !query->error.empty() ||
+ !query->report->mIceCandidateStats.WasPassed()) {
+ return;
+ }
+
+ query->report->mClosed.Construct(true);
+
+ // TODO(bcampen@mozilla.com): Do we need to watch out for cases where the
+ // components within a stream didn't have the same types of relayed
+ // candidates? I have a feeling that late trickle could cause this, but right
+ // now we don't have enough information to detect it (we would need to know
+ // the ICE component id for each candidate pair and candidate)
+
+ std::map<std::string, StreamResult> streamResults;
+
+ // Build list of streams, and whether or not they failed.
+ for (size_t i = 0;
+ i < query->report->mIceCandidatePairStats.Value().Length();
+ ++i) {
+ const RTCIceCandidatePairStats &pair =
+ query->report->mIceCandidatePairStats.Value()[i];
+
+ if (!pair.mState.WasPassed() || !pair.mComponentId.WasPassed()) {
+ MOZ_CRASH();
+ continue;
+ }
+
+ // Note: this is not a "component" in the ICE definition, this is really a
+ // stream ID. This is just the way the stats API is standardized right now.
+ // Very confusing.
+ std::string streamId(
+ NS_ConvertUTF16toUTF8(pair.mComponentId.Value()).get());
+
+ streamResults[streamId].streamSucceeded |=
+ pair.mState.Value() == RTCStatsIceCandidatePairState::Succeeded;
+ }
+
+ for (size_t i = 0;
+ i < query->report->mIceCandidateStats.Value().Length();
+ ++i) {
+ const RTCIceCandidateStats &cand =
+ query->report->mIceCandidateStats.Value()[i];
+
+ if (!cand.mType.WasPassed() ||
+ !cand.mCandidateType.WasPassed() ||
+ !cand.mTransport.WasPassed() ||
+ !cand.mIpAddress.WasPassed() ||
+ !cand.mComponentId.WasPassed()) {
+ // Crash on debug, ignore this candidate otherwise.
+ MOZ_CRASH();
+ continue;
+ }
+
+ /* The bitmask after examaning a candidate should look like this:
+ * REMOTE_GATHERED_HOST_UDP = 1,
+ * REMOTE_GATHERED_HOST_TCP = 1 << 1,
+ * REMOTE_GATHERED_HOST_IPV6 = 1 << 2,
+ * REMOTE_GATHERED_SERVER_REFLEXIVE_UDP = 1 << 3,
+ * REMOTE_GATHERED_SERVER_REFLEXIVE_TCP = 1 << 4,
+ * REMOTE_GATHERED_SERVER_REFLEXIVE_IPV6 = 1 << 5,
+ * REMOTE_GATHERED_TURN_UDP = 1 << 6,
+ * REMOTE_GATHERED_TURN_TCP = 1 << 7, // dummy place holder
+ * REMOTE_GATHERED_TURN_IPV6 = 1 << 8,
+ * REMOTE_GATHERED_PEER_REFLEXIVE_UDP = 1 << 9,
+ * REMOTE_GATHERED_PEER_REFLEXIVE_TCP = 1 << 10,
+ * REMOTE_GATHERED_PEER_REFLEXIVE_IPV6 = 1 << 11,
+ * LOCAL_GATHERED_HOST_UDP = 1 << 16,
+ * LOCAL_GATHERED_HOST_TCP = 1 << 17,
+ * LOCAL_GATHERED_HOST_IPV6 = 1 << 18,
+ * LOCAL_GATHERED_SERVER_REFLEXIVE_UDP = 1 << 19,
+ * LOCAL_GATHERED_SERVER_REFLEXIVE_TCP = 1 << 20,
+ * LOCAL_GATHERED_SERVER_REFLEXIVE_IPV6 = 1 << 21,
+ * LOCAL_GATHERED_TURN_UDP = 1 << 22,
+ * LOCAL_GATHERED_TURN_TCP = 1 << 23,
+ * LOCAL_GATHERED_TURN_IPV6 = 1 << 24,
+ * LOCAL_GATHERED_PEERREFLEXIVE_UDP = 1 << 25,
+ * LOCAL_GATHERED_PEERREFLEXIVE_TCP = 1 << 26,
+ * LOCAL_GATHERED_PEERREFLEXIVE_IPV6 = 1 << 27,
+ *
+ * This results in following shift values
+ */
+ static const uint32_t kLocalShift = 16;
+ static const uint32_t kSrflxShift = 3;
+ static const uint32_t kRelayShift = 6;
+ static const uint32_t kPrflxShift = 9;
+
+ uint32_t candBitmask = GetCandidateIpAndTransportMask(&cand);
+
+ // Note: shift values need to result in the above enum table
+ if (cand.mType.Value() == RTCStatsType::Localcandidate) {
+ candBitmask <<= kLocalShift;
+ }
+
+ if (cand.mCandidateType.Value() == RTCStatsIceCandidateType::Serverreflexive) {
+ candBitmask <<= kSrflxShift;
+ } else if (cand.mCandidateType.Value() == RTCStatsIceCandidateType::Relayed) {
+ candBitmask <<= kRelayShift;
+ } else if (cand.mCandidateType.Value() == RTCStatsIceCandidateType::Peerreflexive) {
+ candBitmask <<= kPrflxShift;
+ }
+
+ // Note: this is not a "component" in the ICE definition, this is really a
+ // stream ID. This is just the way the stats API is standardized right now.
+ // Very confusing.
+ std::string streamId(
+ NS_ConvertUTF16toUTF8(cand.mComponentId.Value()).get());
+
+ streamResults[streamId].candidateTypeBitpattern |= candBitmask;
+ }
+
+ for (auto i = streamResults.begin(); i != streamResults.end(); ++i) {
+ Telemetry::RecordWebrtcIceCandidates(i->second.candidateTypeBitpattern,
+ i->second.streamSucceeded);
+ }
+
+ // Beyond ICE, accumulate telemetry for various PER_CALL settings here.
+
+ if (query->report->mOutboundRTPStreamStats.WasPassed()) {
+ auto& array = query->report->mOutboundRTPStreamStats.Value();
+ for (decltype(array.Length()) i = 0; i < array.Length(); i++) {
+ auto& s = array[i];
+ bool isVideo = (s.mId.Value().Find("video") != -1);
+ if (!isVideo || s.mIsRemote) {
+ continue;
+ }
+ if (s.mBitrateMean.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_ENCODER_BITRATE_AVG_PER_CALL_KBPS,
+ uint32_t(s.mBitrateMean.Value() / 1000));
+ }
+ if (s.mBitrateStdDev.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_ENCODER_BITRATE_STD_DEV_PER_CALL_KBPS,
+ uint32_t(s.mBitrateStdDev.Value() / 1000));
+ }
+ if (s.mFramerateMean.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_ENCODER_FRAMERATE_AVG_PER_CALL,
+ uint32_t(s.mFramerateMean.Value()));
+ }
+ if (s.mFramerateStdDev.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_ENCODER_FRAMERATE_10X_STD_DEV_PER_CALL,
+ uint32_t(s.mFramerateStdDev.Value() * 10));
+ }
+ if (s.mDroppedFrames.WasPassed() && !query->iceStartTime.IsNull()) {
+ double mins = (TimeStamp::Now() - query->iceStartTime).ToSeconds() / 60;
+ if (mins > 0) {
+ Accumulate(WEBRTC_VIDEO_ENCODER_DROPPED_FRAMES_PER_CALL_FPM,
+ uint32_t(double(s.mDroppedFrames.Value()) / mins));
+ }
+ }
+ }
+ }
+
+ if (query->report->mInboundRTPStreamStats.WasPassed()) {
+ auto& array = query->report->mInboundRTPStreamStats.Value();
+ for (decltype(array.Length()) i = 0; i < array.Length(); i++) {
+ auto& s = array[i];
+ bool isVideo = (s.mId.Value().Find("video") != -1);
+ if (!isVideo || s.mIsRemote) {
+ continue;
+ }
+ if (s.mBitrateMean.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_DECODER_BITRATE_AVG_PER_CALL_KBPS,
+ uint32_t(s.mBitrateMean.Value() / 1000));
+ }
+ if (s.mBitrateStdDev.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_DECODER_BITRATE_STD_DEV_PER_CALL_KBPS,
+ uint32_t(s.mBitrateStdDev.Value() / 1000));
+ }
+ if (s.mFramerateMean.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_DECODER_FRAMERATE_AVG_PER_CALL,
+ uint32_t(s.mFramerateMean.Value()));
+ }
+ if (s.mFramerateStdDev.WasPassed()) {
+ Accumulate(WEBRTC_VIDEO_DECODER_FRAMERATE_10X_STD_DEV_PER_CALL,
+ uint32_t(s.mFramerateStdDev.Value() * 10));
+ }
+ if (s.mDiscardedPackets.WasPassed() && !query->iceStartTime.IsNull()) {
+ double mins = (TimeStamp::Now() - query->iceStartTime).ToSeconds() / 60;
+ if (mins > 0) {
+ Accumulate(WEBRTC_VIDEO_DECODER_DISCARDED_PACKETS_PER_CALL_PPM,
+ uint32_t(double(s.mDiscardedPackets.Value()) / mins));
+ }
+ }
+ }
+ }
+
+ // Finally, store the stats
+
+ PeerConnectionCtx *ctx = GetPeerConnectionCtx();
+ if (ctx) {
+ ctx->mStatsForClosedPeerConnections.AppendElement(*query->report, fallible);
+ }
+}
+
+static void GetStatsForLongTermStorage_s(
+ nsAutoPtr<RTCStatsQuery> query) {
+
+ MOZ_ASSERT(query);
+
+ nsresult rv = PeerConnectionImpl::ExecuteStatsQuery_s(query.get());
+
+ // Check whether packets were dropped due to rate limiting during
+ // this call. (These calls must be made on STS)
+ unsigned char rate_limit_bit_pattern = 0;
+ if (!mozilla::nr_socket_short_term_violation_time().IsNull() &&
+ !query->iceStartTime.IsNull() &&
+ mozilla::nr_socket_short_term_violation_time() >= query->iceStartTime) {
+ rate_limit_bit_pattern |= 1;
+ }
+ if (!mozilla::nr_socket_long_term_violation_time().IsNull() &&
+ !query->iceStartTime.IsNull() &&
+ mozilla::nr_socket_long_term_violation_time() >= query->iceStartTime) {
+ rate_limit_bit_pattern |= 2;
+ }
+
+ if (query->failed) {
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_FAILURE,
+ rate_limit_bit_pattern);
+ } else {
+ Telemetry::Accumulate(
+ Telemetry::WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_SUCCESS,
+ rate_limit_bit_pattern);
+ }
+
+ // Even if Telemetry::Accumulate is threadsafe, we still need to send the
+ // query back to main, since that is where it must be destroyed.
+ NS_DispatchToMainThread(
+ WrapRunnableNM(
+ &StoreLongTermICEStatisticsImpl_m,
+ rv,
+ query),
+ NS_DISPATCH_NORMAL);
+}
+
+void WebrtcGlobalInformation::StoreLongTermICEStatistics(
+ PeerConnectionImpl& aPc) {
+ Telemetry::Accumulate(Telemetry::WEBRTC_ICE_FINAL_CONNECTION_STATE,
+ static_cast<uint32_t>(aPc.IceConnectionState()));
+
+ if (aPc.IceConnectionState() == PCImplIceConnectionState::New) {
+ // ICE has not started; we won't have any remote candidates, so recording
+ // statistics on gathered candidates is pointless.
+ return;
+ }
+
+ nsAutoPtr<RTCStatsQuery> query(new RTCStatsQuery(true));
+
+ nsresult rv = aPc.BuildStatsQuery_m(nullptr, query.get());
+
+ NS_ENSURE_SUCCESS_VOID(rv);
+
+ RUN_ON_THREAD(aPc.GetSTSThread(),
+ WrapRunnableNM(&GetStatsForLongTermStorage_s,
+ query),
+ NS_DISPATCH_NORMAL);
+}
+
+} // namespace dom
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.h b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.h
new file mode 100644
index 000000000..fb3789c20
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalInformation.h
@@ -0,0 +1,56 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _WEBRTC_GLOBAL_INFORMATION_H_
+#define _WEBRTC_GLOBAL_INFORMATION_H_
+
+#include "nsString.h"
+#include "mozilla/dom/BindingDeclarations.h" // for Optional
+
+namespace mozilla {
+class PeerConnectionImpl;
+class ErrorResult;
+
+namespace dom {
+
+class GlobalObject;
+class WebrtcGlobalStatisticsCallback;
+class WebrtcGlobalLoggingCallback;
+
+class WebrtcGlobalInformation
+{
+public:
+ static void GetAllStats(const GlobalObject& aGlobal,
+ WebrtcGlobalStatisticsCallback& aStatsCallback,
+ const Optional<nsAString>& pcIdFilter,
+ ErrorResult& aRv);
+
+ static void ClearAllStats(const GlobalObject& aGlobal);
+
+ static void GetLogging(const GlobalObject& aGlobal,
+ const nsAString& aPattern,
+ WebrtcGlobalLoggingCallback& aLoggingCallback,
+ ErrorResult& aRv);
+
+ static void ClearLogging(const GlobalObject& aGlobal);
+
+ static void SetDebugLevel(const GlobalObject& aGlobal, int32_t aLevel);
+ static int32_t DebugLevel(const GlobalObject& aGlobal);
+
+ static void SetAecDebug(const GlobalObject& aGlobal, bool aEnable);
+ static bool AecDebug(const GlobalObject& aGlobal);
+
+ static void StoreLongTermICEStatistics(PeerConnectionImpl& aPc);
+
+private:
+ WebrtcGlobalInformation() = delete;
+ WebrtcGlobalInformation(const WebrtcGlobalInformation& aOrig) = delete;
+ WebrtcGlobalInformation& operator=(
+ const WebrtcGlobalInformation& aRhs) = delete;
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif // _WEBRTC_GLOBAL_INFORMATION_H_
diff --git a/media/webrtc/signaling/src/peerconnection/WebrtcGlobalParent.h b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalParent.h
new file mode 100644
index 000000000..4e2d0509f
--- /dev/null
+++ b/media/webrtc/signaling/src/peerconnection/WebrtcGlobalParent.h
@@ -0,0 +1,53 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _WEBRTC_GLOBAL_PARENT_H_
+#define _WEBRTC_GLOBAL_PARENT_H_
+
+#include "mozilla/dom/PWebrtcGlobalParent.h"
+#include "mozilla/dom/RTCStatsReportBinding.h"
+#include "mozilla/dom/BindingDeclarations.h"
+#include "nsISupportsImpl.h"
+
+namespace mozilla {
+namespace dom {
+
+class WebrtcParents;
+
+class WebrtcGlobalParent
+ : public PWebrtcGlobalParent
+{
+ friend class ContentParent;
+ friend class WebrtcGlobalInformation;
+ friend class WebrtcContentParents;
+
+ bool mShutdown;
+
+ MOZ_IMPLICIT WebrtcGlobalParent();
+
+ static WebrtcGlobalParent* Alloc();
+ static bool Dealloc(WebrtcGlobalParent* aActor);
+
+ virtual bool RecvGetStatsResult(const int& aRequestId,
+ nsTArray<RTCStatsReportInternal>&& aStats) override;
+ virtual bool RecvGetLogResult(const int& aRequestId,
+ const WebrtcGlobalLog& aLog) override;
+
+ virtual void ActorDestroy(ActorDestroyReason aWhy) override;
+ virtual bool Recv__delete__() override;
+
+ virtual ~WebrtcGlobalParent();
+public:
+ NS_INLINE_DECL_REFCOUNTING(WebrtcGlobalParent)
+
+ bool IsActive()
+ {
+ return !mShutdown;
+ }
+};
+
+} // namespace dom
+} // namespace mozilla
+
+#endif // _WEBRTC_GLOBAL_PARENT_H_
diff --git a/media/webrtc/signaling/src/sdp/Sdp.h b/media/webrtc/signaling/src/sdp/Sdp.h
new file mode 100644
index 000000000..8eeb89e2f
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/Sdp.h
@@ -0,0 +1,195 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+
+ ,-----. ,--. ,--.
+ ' .--./ ,--,--.,--.,--.,-' '-.`--' ,---. ,--,--,
+ | | ' ,-. || || |'-. .-',--.| .-. || `
+ ' '--'\\ '-' |' '' ' | | | |' '-' '| || |
+ `-----' `--`--' `----' `--' `--' `---' `--''--'
+
+ :+o+-
+ -dNNNNNd.
+ yNNNNNNNs
+ :mNNNNNm-
+ `/sso/``-://-
+ .:+sydNNNNNNms: `://`
+ `-/+shmNNNNNNNNNNNNNNNms- :mNNNm/
+ `-/oydmNNNNNNNNNNNNNNNNNNNNNNNNdo- +NNNNNN+
+ .shmNNNNNNNNNNNmdyo/:dNNNNNNNNNNNNNNNNdo. `sNNNNNm+
+ hNNNNNNNNmhs+:-` .dNNNNNNNNNNNNNNNNNNNNh+-` `hNNNNNm:
+ -yddyo/:. -dNNNNm::ymNNNNNNNNNNNNNNNmdy+/dNNNNNd.
+ :mNNNNd. `/ymNNNNNNNNNNNNNNNNNNNNNNh`
+ +NNNNNh` `+hNNNNNNNNNNNNNNNNNNNs
+ sNNNNNy` .yNNNNNm`-/oymNNNm+
+ `yNNNNNo oNNNNNm` `-.
+ .dNNNNm/ oNNNNNm`
+ oNNNNm: +NNNNNm`
+ `+yho. +NNNNNm`
+ +NNNNNNs.
+ `yNNNNNNmy-
+ -smNNNNNNh:
+ .smNNNNNNh/
+ `omNNNNNNd:
+ `+dNNNNNd
+ ````......```` /hmdy-
+ `.:/+osyhddmNNMMMMMMMMMMMMMMMMMMMMNNmddhyso+/:.`
+ `-+shmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNmhs+-`
+ -smMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMds-
+ hMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMh
+ yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMs
+ .ohNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNh+.
+ ./oydmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMmhyo:.
+ `.:/+osyyhddmmNNMMMMMMMMMMMMMMNNmmddhyyso+/:.`
+
+ ,--------.,--. ,--. ,--.
+ '--. .--'| ,---. `--' ,---. | | ,---.
+ | | | .-. |,--.( .-' | |( .-'
+ | | | | | || |.-' `) | |.-' `)
+ `--' `--' `--'`--'`----' `--'`----'
+ ,--.
+ ,---. ,------. ,------. ,--. | |
+ ' .-' | .-. \ | .--. ' ,--,--.,--.--.,-' '-. ,--,--.| |
+ `. `-. | | \ :| '--' |' ,-. || .--''-. .-'' ,-. || |
+ .-' || '--' /| | --' \ '-' || | | | \ '-' |`--'
+ `-----' `-------' `--' `--`--'`--' `--' `--`--'.--.
+ '__'
+*/
+
+#ifndef _SDP_H_
+#define _SDP_H_
+
+#include <ostream>
+#include <vector>
+#include <sstream>
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Maybe.h"
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/sdp/SdpAttributeList.h"
+#include "signaling/src/sdp/SdpEnum.h"
+
+namespace mozilla
+{
+
+class SdpOrigin;
+class SdpEncryptionKey;
+class SdpMediaSection;
+
+/**
+ * Base class for an SDP
+ */
+class Sdp
+{
+public:
+ Sdp(){};
+ virtual ~Sdp(){};
+
+ virtual const SdpOrigin& GetOrigin() const = 0;
+ // Note: connection information is always retrieved from media sections
+ virtual uint32_t GetBandwidth(const std::string& type) const = 0;
+
+ virtual const SdpAttributeList& GetAttributeList() const = 0;
+ virtual SdpAttributeList& GetAttributeList() = 0;
+
+ virtual size_t GetMediaSectionCount() const = 0;
+ virtual const SdpMediaSection& GetMediaSection(size_t level) const = 0;
+ virtual SdpMediaSection& GetMediaSection(size_t level) = 0;
+
+ virtual SdpMediaSection& AddMediaSection(SdpMediaSection::MediaType media,
+ SdpDirectionAttribute::Direction dir,
+ uint16_t port,
+ SdpMediaSection::Protocol proto,
+ sdp::AddrType addrType,
+ const std::string& addr) = 0;
+
+ virtual void Serialize(std::ostream&) const = 0;
+
+ std::string ToString() const;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const Sdp& sdp)
+{
+ sdp.Serialize(os);
+ return os;
+}
+
+inline std::string
+Sdp::ToString() const
+{
+ std::stringstream s;
+ s << *this;
+ return s.str();
+}
+
+class SdpOrigin
+{
+public:
+ SdpOrigin(const std::string& username, uint64_t sessId, uint64_t sessVer,
+ sdp::AddrType addrType, const std::string& addr)
+ : mUsername(username),
+ mSessionId(sessId),
+ mSessionVersion(sessVer),
+ mAddrType(addrType),
+ mAddress(addr)
+ {
+ }
+
+ const std::string&
+ GetUsername() const
+ {
+ return mUsername;
+ }
+
+ uint64_t
+ GetSessionId() const
+ {
+ return mSessionId;
+ }
+
+ uint64_t
+ GetSessionVersion() const
+ {
+ return mSessionVersion;
+ }
+
+ sdp::AddrType
+ GetAddrType() const
+ {
+ return mAddrType;
+ }
+
+ const std::string&
+ GetAddress() const
+ {
+ return mAddress;
+ }
+
+ void
+ Serialize(std::ostream& os) const
+ {
+ sdp::NetType netType = sdp::kInternet;
+ os << "o=" << mUsername << " " << mSessionId << " " << mSessionVersion
+ << " " << netType << " " << mAddrType << " " << mAddress << "\r\n";
+ }
+
+private:
+ std::string mUsername;
+ uint64_t mSessionId;
+ uint64_t mSessionVersion;
+ sdp::AddrType mAddrType;
+ std::string mAddress;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SdpOrigin& origin)
+{
+ origin.Serialize(os);
+ return os;
+}
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SdpAttribute.cpp b/media/webrtc/signaling/src/sdp/SdpAttribute.cpp
new file mode 100644
index 000000000..06fc94dbb
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpAttribute.cpp
@@ -0,0 +1,1674 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SdpAttribute.h"
+#include "signaling/src/sdp/SdpHelper.h"
+#include <iomanip>
+
+#ifdef CRLF
+#undef CRLF
+#endif
+#define CRLF "\r\n"
+
+namespace mozilla
+{
+
+static unsigned char
+PeekChar(std::istream& is, std::string* error)
+{
+ int next = is.peek();
+ if (next == EOF) {
+ *error = "Truncated";
+ return 0;
+ }
+
+ return next;
+}
+
+static std::string ParseToken(std::istream& is,
+ const std::string& delims,
+ std::string* error)
+{
+ std::string token;
+ while (is) {
+ unsigned char c = PeekChar(is, error);
+ if (!c || (delims.find(c) != std::string::npos)) {
+ break;
+ }
+ token.push_back(std::tolower(is.get()));
+ }
+ return token;
+}
+
+static bool
+SkipChar(std::istream& is, unsigned char c, std::string* error)
+{
+ if (PeekChar(is, error) != c) {
+ *error = "Expected \'";
+ error->push_back(c);
+ error->push_back('\'');
+ return false;
+ }
+
+ is.get();
+ return true;
+}
+
+
+void
+SdpConnectionAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mValue << CRLF;
+}
+
+void
+SdpDirectionAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mValue << CRLF;
+}
+
+void
+SdpDtlsMessageAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mRole << " " << mValue << CRLF;
+}
+
+bool
+SdpDtlsMessageAttribute::Parse(std::istream& is, std::string* error)
+{
+ std::string roleToken = ParseToken(is, " ", error);
+ if (roleToken == "server") {
+ mRole = kServer;
+ } else if (roleToken == "client") {
+ mRole = kClient;
+ } else {
+ *error = "Invalid dtls-message role; must be either client or server";
+ return false;
+ }
+
+ is >> std::ws;
+
+ std::string s(std::istreambuf_iterator<char>(is), {});
+ mValue = s;
+
+ return true;
+}
+
+void
+SdpExtmapAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mExtmaps.begin(); i != mExtmaps.end(); ++i) {
+ os << "a=" << mType << ":" << i->entry;
+ if (i->direction_specified) {
+ os << "/" << i->direction;
+ }
+ os << " " << i->extensionname;
+ if (i->extensionattributes.length()) {
+ os << " " << i->extensionattributes;
+ }
+ os << CRLF;
+ }
+}
+
+void
+SdpFingerprintAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mFingerprints.begin(); i != mFingerprints.end(); ++i) {
+ os << "a=" << mType << ":" << i->hashFunc << " "
+ << FormatFingerprint(i->fingerprint) << CRLF;
+ }
+}
+
+// Format the fingerprint in RFC 4572 Section 5 attribute format
+std::string
+SdpFingerprintAttributeList::FormatFingerprint(const std::vector<uint8_t>& fp)
+{
+ if (fp.empty()) {
+ MOZ_ASSERT(false, "Cannot format an empty fingerprint.");
+ return "";
+ }
+
+ std::ostringstream os;
+ for (auto i = fp.begin(); i != fp.end(); ++i) {
+ os << ":" << std::hex << std::uppercase << std::setw(2) << std::setfill('0')
+ << static_cast<uint32_t>(*i);
+ }
+ return os.str().substr(1);
+}
+
+static uint8_t
+FromUppercaseHex(char ch)
+{
+ if ((ch >= '0') && (ch <= '9')) {
+ return ch - '0';
+ }
+ if ((ch >= 'A') && (ch <= 'F')) {
+ return ch - 'A' + 10;
+ }
+ return 16; // invalid
+}
+
+// Parse the fingerprint from RFC 4572 Section 5 attribute format
+std::vector<uint8_t>
+SdpFingerprintAttributeList::ParseFingerprint(const std::string& str)
+{
+ size_t targetSize = (str.length() + 1) / 3;
+ std::vector<uint8_t> fp(targetSize);
+ size_t fpIndex = 0;
+
+ if (str.length() % 3 != 2) {
+ fp.clear();
+ return fp;
+ }
+
+ for (size_t i = 0; i < str.length(); i += 3) {
+ uint8_t high = FromUppercaseHex(str[i]);
+ uint8_t low = FromUppercaseHex(str[i + 1]);
+ if (high > 0xf || low > 0xf ||
+ (i + 2 < str.length() && str[i + 2] != ':')) {
+ fp.clear(); // error
+ return fp;
+ }
+ fp[fpIndex++] = high << 4 | low;
+ }
+ return fp;
+}
+
+void
+SdpFmtpAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mFmtps.begin(); i != mFmtps.end(); ++i) {
+ if (i->parameters) {
+ os << "a=" << mType << ":" << i->format << " ";
+ i->parameters->Serialize(os);
+ os << CRLF;
+ }
+ }
+}
+
+void
+SdpGroupAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mGroups.begin(); i != mGroups.end(); ++i) {
+ os << "a=" << mType << ":" << i->semantics;
+ for (auto j = i->tags.begin(); j != i->tags.end(); ++j) {
+ os << " " << (*j);
+ }
+ os << CRLF;
+ }
+}
+
+// We're just using an SdpStringAttribute for this right now
+#if 0
+void SdpIdentityAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mAssertion;
+ for (auto i = mExtensions.begin(); i != mExtensions.end(); i++) {
+ os << (i == mExtensions.begin() ? " " : ";") << (*i);
+ }
+ os << CRLF;
+}
+#endif
+
+// Class to help with omitting a leading delimiter for the first item in a list
+class SkipFirstDelimiter
+{
+ public:
+ explicit SkipFirstDelimiter(const std::string& delim) :
+ mDelim(delim),
+ mFirst(true)
+ {}
+
+ std::ostream& print(std::ostream& os)
+ {
+ if (!mFirst) {
+ os << mDelim;
+ }
+ mFirst = false;
+ return os;
+ }
+
+ private:
+ std::string mDelim;
+ bool mFirst;
+};
+
+static std::ostream& operator<<(std::ostream& os, SkipFirstDelimiter& delim)
+{
+ return delim.print(os);
+}
+
+void
+SdpImageattrAttributeList::XYRange::Serialize(std::ostream& os) const
+{
+ if (discreteValues.size() == 0) {
+ os << "[" << min << ":";
+ if (step != 1) {
+ os << step << ":";
+ }
+ os << max << "]";
+ } else if (discreteValues.size() == 1) {
+ os << discreteValues.front();
+ } else {
+ os << "[";
+ SkipFirstDelimiter comma(",");
+ for (auto value : discreteValues) {
+ os << comma << value;
+ }
+ os << "]";
+ }
+}
+
+template<typename T>
+bool
+GetUnsigned(std::istream& is, T min, T max, T* value, std::string* error)
+{
+ if (PeekChar(is, error) == '-') {
+ *error = "Value is less than 0";
+ return false;
+ }
+
+ is >> std::noskipws >> *value;
+
+ if (is.fail()) {
+ *error = "Malformed";
+ return false;
+ }
+
+ if (*value < min) {
+ *error = "Value too small";
+ return false;
+ }
+
+ if (*value > max) {
+ *error = "Value too large";
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+GetXYValue(std::istream& is, uint32_t* value, std::string* error)
+{
+ return GetUnsigned<uint32_t>(is, 1, 999999, value, error);
+}
+
+bool
+SdpImageattrAttributeList::XYRange::ParseDiscreteValues(std::istream& is,
+ std::string* error)
+{
+ do {
+ uint32_t value;
+ if (!GetXYValue(is, &value, error)) {
+ return false;
+ }
+ discreteValues.push_back(value);
+ } while (SkipChar(is, ',', error));
+
+ return SkipChar(is, ']', error);
+}
+
+bool
+SdpImageattrAttributeList::XYRange::ParseAfterMin(std::istream& is,
+ std::string* error)
+{
+ // We have already parsed "[320:", and now expect another uint
+ uint32_t value;
+ if (!GetXYValue(is, &value, error)) {
+ return false;
+ }
+
+ if (SkipChar(is, ':', error)) {
+ // Range with step eg [320:16:640]
+ step = value;
+ // Now |value| should be the max
+ if (!GetXYValue(is, &value, error)) {
+ return false;
+ }
+ }
+
+ max = value;
+ if (min >= max) {
+ *error = "Min is not smaller than max";
+ return false;
+ }
+
+ return SkipChar(is, ']', error);
+}
+
+bool
+SdpImageattrAttributeList::XYRange::ParseAfterBracket(std::istream& is,
+ std::string* error)
+{
+ // Either a range, or a list of discrete values
+ // [320:640], [320:16:640], or [320,640]
+ uint32_t value;
+ if (!GetXYValue(is, &value, error)) {
+ return false;
+ }
+
+ if (SkipChar(is, ':', error)) {
+ // Range - [640:480] or [640:16:480]
+ min = value;
+ return ParseAfterMin(is, error);
+ }
+
+ if (SkipChar(is, ',', error)) {
+ discreteValues.push_back(value);
+ return ParseDiscreteValues(is, error);
+ }
+
+ *error = "Expected \':\' or \',\'";
+ return false;
+}
+
+bool
+SdpImageattrAttributeList::XYRange::Parse(std::istream& is, std::string* error)
+{
+ if (SkipChar(is, '[', error)) {
+ return ParseAfterBracket(is, error);
+ }
+
+ // Single discrete value
+ uint32_t value;
+ if (!GetXYValue(is, &value, error)) {
+ return false;
+ }
+ discreteValues.push_back(value);
+
+ return true;
+}
+
+static bool
+GetSPValue(std::istream& is, float* value, std::string* error)
+{
+ return GetUnsigned<float>(is, 0.1f, 9.9999f, value, error);
+}
+
+static bool
+GetQValue(std::istream& is, float* value, std::string* error)
+{
+ return GetUnsigned<float>(is, 0.0f, 1.0f, value, error);
+}
+
+bool
+SdpImageattrAttributeList::SRange::ParseDiscreteValues(std::istream& is,
+ std::string* error)
+{
+ do {
+ float value;
+ if (!GetSPValue(is, &value, error)) {
+ return false;
+ }
+ discreteValues.push_back(value);
+ } while (SkipChar(is, ',', error));
+
+ return SkipChar(is, ']', error);
+}
+
+bool
+SdpImageattrAttributeList::SRange::ParseAfterMin(std::istream& is,
+ std::string* error)
+{
+ if (!GetSPValue(is, &max, error)) {
+ return false;
+ }
+
+ if (min >= max) {
+ *error = "Min is not smaller than max";
+ return false;
+ }
+
+ return SkipChar(is, ']', error);
+}
+
+bool
+SdpImageattrAttributeList::SRange::ParseAfterBracket(std::istream& is,
+ std::string* error)
+{
+ // Either a range, or a list of discrete values
+ float value;
+ if (!GetSPValue(is, &value, error)) {
+ return false;
+ }
+
+ if (SkipChar(is, '-', error)) {
+ min = value;
+ return ParseAfterMin(is, error);
+ }
+
+ if (SkipChar(is, ',', error)) {
+ discreteValues.push_back(value);
+ return ParseDiscreteValues(is, error);
+ }
+
+ *error = "Expected either \'-\' or \',\'";
+ return false;
+}
+
+bool
+SdpImageattrAttributeList::SRange::Parse(std::istream& is, std::string* error)
+{
+ if (SkipChar(is, '[', error)) {
+ return ParseAfterBracket(is, error);
+ }
+
+ // Single discrete value
+ float value;
+ if (!GetSPValue(is, &value, error)) {
+ return false;
+ }
+ discreteValues.push_back(value);
+ return true;
+}
+
+bool
+SdpImageattrAttributeList::PRange::Parse(std::istream& is, std::string* error)
+{
+ if (!SkipChar(is, '[', error)) {
+ return false;
+ }
+
+ if (!GetSPValue(is, &min, error)) {
+ return false;
+ }
+
+ if (!SkipChar(is, '-', error)) {
+ return false;
+ }
+
+ if (!GetSPValue(is, &max, error)) {
+ return false;
+ }
+
+ if (min >= max) {
+ *error = "min must be smaller than max";
+ return false;
+ }
+
+ if (!SkipChar(is, ']', error)) {
+ return false;
+ }
+ return true;
+}
+
+void
+SdpImageattrAttributeList::SRange::Serialize(std::ostream& os) const
+{
+ os << std::setprecision(4) << std::fixed;
+ if (discreteValues.size() == 0) {
+ os << "[" << min << "-" << max << "]";
+ } else if (discreteValues.size() == 1) {
+ os << discreteValues.front();
+ } else {
+ os << "[";
+ SkipFirstDelimiter comma(",");
+ for (auto value : discreteValues) {
+ os << comma << value;
+ }
+ os << "]";
+ }
+}
+
+void
+SdpImageattrAttributeList::PRange::Serialize(std::ostream& os) const
+{
+ os << std::setprecision(4) << std::fixed;
+ os << "[" << min << "-" << max << "]";
+}
+
+static std::string ParseKey(std::istream& is, std::string* error)
+{
+ std::string token = ParseToken(is, "=", error);
+ if (!SkipChar(is, '=', error)) {
+ return "";
+ }
+ return token;
+}
+
+static bool SkipBraces(std::istream& is, std::string* error)
+{
+ if (PeekChar(is, error) != '[') {
+ *error = "Expected \'[\'";
+ return false;
+ }
+
+ size_t braceCount = 0;
+ do {
+ switch (PeekChar(is, error)) {
+ case '[':
+ ++braceCount;
+ break;
+ case ']':
+ --braceCount;
+ break;
+ default:
+ break;
+ }
+ is.get();
+ } while (braceCount && is);
+
+ if (!is) {
+ *error = "Expected closing brace";
+ return false;
+ }
+
+ return true;
+}
+
+// Assumptions:
+// 1. If the value contains '[' or ']', they are balanced.
+// 2. The value contains no ',' outside of brackets.
+static bool SkipValue(std::istream& is, std::string* error)
+{
+ while (is) {
+ switch (PeekChar(is, error)) {
+ case ',':
+ case ']':
+ return true;
+ case '[':
+ if (!SkipBraces(is, error)) {
+ return false;
+ }
+ break;
+ default:
+ is.get();
+ }
+ }
+
+ *error = "No closing \']\' on set";
+ return false;
+}
+
+bool
+SdpImageattrAttributeList::Set::Parse(std::istream& is, std::string* error)
+{
+ if (!SkipChar(is, '[', error)) {
+ return false;
+ }
+
+ if (ParseKey(is, error) != "x") {
+ *error = "Expected x=";
+ return false;
+ }
+
+ if (!xRange.Parse(is, error)) {
+ return false;
+ }
+
+ if (!SkipChar(is, ',', error)) {
+ return false;
+ }
+
+ if (ParseKey(is, error) != "y") {
+ *error = "Expected y=";
+ return false;
+ }
+
+ if (!yRange.Parse(is, error)) {
+ return false;
+ }
+
+ qValue = 0.5f; // default
+
+ bool gotSar = false;
+ bool gotPar = false;
+ bool gotQ = false;
+
+ while (SkipChar(is, ',', error)) {
+ std::string key = ParseKey(is, error);
+ if (key.empty()) {
+ *error = "Expected key-value";
+ return false;
+ }
+
+ if (key == "sar") {
+ if (gotSar) {
+ *error = "Extra sar parameter";
+ return false;
+ }
+ gotSar = true;
+ if (!sRange.Parse(is, error)) {
+ return false;
+ }
+ } else if (key == "par") {
+ if (gotPar) {
+ *error = "Extra par parameter";
+ return false;
+ }
+ gotPar = true;
+ if (!pRange.Parse(is, error)) {
+ return false;
+ }
+ } else if (key == "q") {
+ if (gotQ) {
+ *error = "Extra q parameter";
+ return false;
+ }
+ gotQ = true;
+ if (!GetQValue(is, &qValue, error)) {
+ return false;
+ }
+ } else {
+ if (!SkipValue(is, error)) {
+ return false;
+ }
+ }
+ }
+
+ return SkipChar(is, ']', error);
+}
+
+void
+SdpImageattrAttributeList::Set::Serialize(std::ostream& os) const
+{
+ os << "[x=";
+ xRange.Serialize(os);
+ os << ",y=";
+ yRange.Serialize(os);
+ if (sRange.IsSet()) {
+ os << ",sar=";
+ sRange.Serialize(os);
+ }
+ if (pRange.IsSet()) {
+ os << ",par=";
+ pRange.Serialize(os);
+ }
+ if (qValue >= 0) {
+ os << std::setprecision(2) << std::fixed << ",q=" << qValue;
+ }
+ os << "]";
+}
+
+bool
+SdpImageattrAttributeList::Imageattr::ParseSets(std::istream& is,
+ std::string* error)
+{
+ std::string type = ParseToken(is, " \t", error);
+
+ bool* isAll = nullptr;
+ std::vector<Set>* sets = nullptr;
+
+ if (type == "send") {
+ isAll = &sendAll;
+ sets = &sendSets;
+ } else if (type == "recv") {
+ isAll = &recvAll;
+ sets = &recvSets;
+ } else {
+ *error = "Unknown type, must be either send or recv";
+ return false;
+ }
+
+ if (*isAll || !sets->empty()) {
+ *error = "Multiple send or recv set lists";
+ return false;
+ }
+
+ is >> std::ws;
+ if (SkipChar(is, '*', error)) {
+ *isAll = true;
+ return true;
+ }
+
+ do {
+ Set set;
+ if (!set.Parse(is, error)) {
+ return false;
+ }
+
+ sets->push_back(set);
+ is >> std::ws;
+ } while (PeekChar(is, error) == '[');
+
+ return true;
+}
+
+bool
+SdpImageattrAttributeList::Imageattr::Parse(std::istream& is,
+ std::string* error)
+{
+ if (!SkipChar(is, '*', error)) {
+ uint16_t value;
+ if (!GetUnsigned<uint16_t>(is, 0, UINT16_MAX, &value, error)) {
+ return false;
+ }
+ pt = Some(value);
+ }
+
+ is >> std::ws;
+ if (!ParseSets(is, error)) {
+ return false;
+ }
+
+ // There might be a second one
+ is >> std::ws;
+ if (is.eof()) {
+ return true;
+ }
+
+ if (!ParseSets(is, error)) {
+ return false;
+ }
+
+ is >> std::ws;
+ if (!is.eof()) {
+ *error = "Trailing characters";
+ return false;
+ }
+
+ return true;
+}
+
+void
+SdpImageattrAttributeList::Imageattr::Serialize(std::ostream& os) const
+{
+ if (pt.isSome()) {
+ os << *pt;
+ } else {
+ os << "*";
+ }
+
+ if (sendAll) {
+ os << " send *";
+ } else if (!sendSets.empty()) {
+ os << " send";
+ for (auto& set : sendSets) {
+ os << " ";
+ set.Serialize(os);
+ }
+ }
+
+ if (recvAll) {
+ os << " recv *";
+ } else if (!recvSets.empty()) {
+ os << " recv";
+ for (auto& set : recvSets) {
+ os << " ";
+ set.Serialize(os);
+ }
+ }
+}
+
+void
+SdpImageattrAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto& imageattr : mImageattrs) {
+ os << "a=" << mType << ":";
+ imageattr.Serialize(os);
+ os << CRLF;
+ }
+}
+
+bool
+SdpImageattrAttributeList::PushEntry(const std::string& raw,
+ std::string* error,
+ size_t* errorPos)
+{
+ std::istringstream is(raw);
+
+ Imageattr imageattr;
+ if (!imageattr.Parse(is, error)) {
+ is.clear();
+ *errorPos = is.tellg();
+ return false;
+ }
+
+ mImageattrs.push_back(imageattr);
+ return true;
+}
+
+void
+SdpMsidAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mMsids.begin(); i != mMsids.end(); ++i) {
+ os << "a=" << mType << ":" << i->identifier;
+ if (i->appdata.length()) {
+ os << " " << i->appdata;
+ }
+ os << CRLF;
+ }
+}
+
+void
+SdpMsidSemanticAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mMsidSemantics.begin(); i != mMsidSemantics.end(); ++i) {
+ os << "a=" << mType << ":" << i->semantic;
+ for (auto j = i->msids.begin(); j != i->msids.end(); ++j) {
+ os << " " << *j;
+ }
+ os << CRLF;
+ }
+}
+
+void
+SdpRemoteCandidatesAttribute::Serialize(std::ostream& os) const
+{
+ if (mCandidates.empty()) {
+ return;
+ }
+
+ os << "a=" << mType;
+ for (auto i = mCandidates.begin(); i != mCandidates.end(); i++) {
+ os << (i == mCandidates.begin() ? ":" : " ") << i->id << " " << i->address
+ << " " << i->port;
+ }
+ os << CRLF;
+}
+
+bool
+SdpRidAttributeList::Rid::ParseParameters(std::istream& is, std::string* error)
+{
+ if (!PeekChar(is, error)) {
+ // No parameters
+ return true;
+ }
+
+ do {
+ is >> std::ws;
+ std::string key = ParseKey(is, error);
+ if (key.empty()) {
+ return false; // Illegal trailing cruft
+ }
+
+ // This allows pt= to appear anywhere, instead of only at the beginning, but
+ // this ends up being significantly less code.
+ if (key == "pt") {
+ if (!ParseFormats(is, error)) {
+ return false;
+ }
+ } else if (key == "max-width") {
+ if (!GetUnsigned<uint32_t>(
+ is, 0, UINT32_MAX, &constraints.maxWidth, error)) {
+ return false;
+ }
+ } else if (key == "max-height") {
+ if (!GetUnsigned<uint32_t>(
+ is, 0, UINT32_MAX, &constraints.maxHeight, error)) {
+ return false;
+ }
+ } else if (key == "max-fps") {
+ if (!GetUnsigned<uint32_t>(
+ is, 0, UINT32_MAX, &constraints.maxFps, error)) {
+ return false;
+ }
+ } else if (key == "max-fs") {
+ if (!GetUnsigned<uint32_t>(
+ is, 0, UINT32_MAX, &constraints.maxFs, error)) {
+ return false;
+ }
+ } else if (key == "max-br") {
+ if (!GetUnsigned<uint32_t>(
+ is, 0, UINT32_MAX, &constraints.maxBr, error)) {
+ return false;
+ }
+ } else if (key == "max-pps") {
+ if (!GetUnsigned<uint32_t>(
+ is, 0, UINT32_MAX, &constraints.maxPps, error)) {
+ return false;
+ }
+ } else if (key == "depend") {
+ if (!ParseDepend(is, error)) {
+ return false;
+ }
+ } else {
+ (void) ParseToken(is, ";", error);
+ }
+ } while (SkipChar(is, ';', error));
+ return true;
+}
+
+bool
+SdpRidAttributeList::Rid::ParseDepend(
+ std::istream& is,
+ std::string* error)
+{
+ do {
+ std::string id = ParseToken(is, ",;", error);
+ if (id.empty()) {
+ return false;
+ }
+ dependIds.push_back(id);
+ } while(SkipChar(is, ',', error));
+
+ return true;
+}
+
+bool
+SdpRidAttributeList::Rid::ParseFormats(
+ std::istream& is,
+ std::string* error)
+{
+ do {
+ uint16_t fmt;
+ if (!GetUnsigned<uint16_t>(is, 0, 127, &fmt, error)) {
+ return false;
+ }
+ formats.push_back(fmt);
+ } while (SkipChar(is, ',', error));
+
+ return true;
+}
+
+void
+SdpRidAttributeList::Rid::SerializeParameters(std::ostream& os) const
+{
+ if (!HasParameters()) {
+ return;
+ }
+
+ os << " ";
+
+ SkipFirstDelimiter semic(";");
+
+ if (!formats.empty()) {
+ os << semic << "pt=";
+ SkipFirstDelimiter comma(",");
+ for (uint16_t fmt : formats) {
+ os << comma << fmt;
+ }
+ }
+
+ if (constraints.maxWidth) {
+ os << semic << "max-width=" << constraints.maxWidth;
+ }
+
+ if (constraints.maxHeight) {
+ os << semic << "max-height=" << constraints.maxHeight;
+ }
+
+ if (constraints.maxFps) {
+ os << semic << "max-fps=" << constraints.maxFps;
+ }
+
+ if (constraints.maxFs) {
+ os << semic << "max-fs=" << constraints.maxFs;
+ }
+
+ if (constraints.maxBr) {
+ os << semic << "max-br=" << constraints.maxBr;
+ }
+
+ if (constraints.maxPps) {
+ os << semic << "max-pps=" << constraints.maxPps;
+ }
+
+ if (!dependIds.empty()) {
+ os << semic << "depend=";
+ SkipFirstDelimiter comma(",");
+ for (const std::string& id : dependIds) {
+ os << comma << id;
+ }
+ }
+}
+
+bool
+SdpRidAttributeList::Rid::Parse(std::istream& is, std::string* error)
+{
+ id = ParseToken(is, " ", error);
+ if (id.empty()) {
+ return false;
+ }
+
+ is >> std::ws;
+ std::string directionToken = ParseToken(is, " ", error);
+ if (directionToken == "send") {
+ direction = sdp::kSend;
+ } else if (directionToken == "recv") {
+ direction = sdp::kRecv;
+ } else {
+ *error = "Invalid direction, must be either send or recv";
+ return false;
+ }
+
+ return ParseParameters(is, error);
+}
+
+void
+SdpRidAttributeList::Rid::Serialize(std::ostream& os) const
+{
+ os << id << " " << direction;
+ SerializeParameters(os);
+}
+
+bool
+SdpRidAttributeList::Rid::HasFormat(const std::string& format) const
+{
+ uint16_t formatAsInt;
+ if (!SdpHelper::GetPtAsInt(format, &formatAsInt)) {
+ return false;
+ }
+
+ if (formats.empty()) {
+ return true;
+ }
+
+ return (std::find(formats.begin(), formats.end(), formatAsInt) !=
+ formats.end());
+}
+
+void
+SdpRidAttributeList::Serialize(std::ostream& os) const
+{
+ for (const Rid& rid : mRids) {
+ os << "a=" << mType << ":";
+ rid.Serialize(os);
+ os << CRLF;
+ }
+}
+
+bool
+SdpRidAttributeList::PushEntry(const std::string& raw,
+ std::string* error,
+ size_t* errorPos)
+{
+ std::istringstream is(raw);
+
+ Rid rid;
+ if (!rid.Parse(is, error)) {
+ is.clear();
+ *errorPos = is.tellg();
+ return false;
+ }
+
+ mRids.push_back(rid);
+ return true;
+}
+
+void
+SdpRtcpAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mPort;
+ if (!mAddress.empty()) {
+ os << " " << mNetType << " " << mAddrType << " " << mAddress;
+ }
+ os << CRLF;
+}
+
+const char* SdpRtcpFbAttributeList::pli = "pli";
+const char* SdpRtcpFbAttributeList::sli = "sli";
+const char* SdpRtcpFbAttributeList::rpsi = "rpsi";
+const char* SdpRtcpFbAttributeList::app = "app";
+
+const char* SdpRtcpFbAttributeList::fir = "fir";
+const char* SdpRtcpFbAttributeList::tmmbr = "tmmbr";
+const char* SdpRtcpFbAttributeList::tstr = "tstr";
+const char* SdpRtcpFbAttributeList::vbcm = "vbcm";
+
+void
+SdpRtcpFbAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mFeedbacks.begin(); i != mFeedbacks.end(); ++i) {
+ os << "a=" << mType << ":" << i->pt << " " << i->type;
+ if (i->parameter.length()) {
+ os << " " << i->parameter;
+ if (i->extra.length()) {
+ os << " " << i->extra;
+ }
+ }
+ os << CRLF;
+ }
+}
+
+static bool
+ShouldSerializeChannels(SdpRtpmapAttributeList::CodecType type)
+{
+ switch (type) {
+ case SdpRtpmapAttributeList::kOpus:
+ case SdpRtpmapAttributeList::kG722:
+ return true;
+ case SdpRtpmapAttributeList::kPCMU:
+ case SdpRtpmapAttributeList::kPCMA:
+ case SdpRtpmapAttributeList::kVP8:
+ case SdpRtpmapAttributeList::kVP9:
+ case SdpRtpmapAttributeList::kiLBC:
+ case SdpRtpmapAttributeList::kiSAC:
+ case SdpRtpmapAttributeList::kH264:
+ case SdpRtpmapAttributeList::kRed:
+ case SdpRtpmapAttributeList::kUlpfec:
+ case SdpRtpmapAttributeList::kTelephoneEvent:
+ return false;
+ case SdpRtpmapAttributeList::kOtherCodec:
+ return true;
+ }
+ MOZ_CRASH();
+}
+
+void
+SdpRtpmapAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mRtpmaps.begin(); i != mRtpmaps.end(); ++i) {
+ os << "a=" << mType << ":" << i->pt << " " << i->name << "/" << i->clock;
+ if (i->channels && ShouldSerializeChannels(i->codec)) {
+ os << "/" << i->channels;
+ }
+ os << CRLF;
+ }
+}
+
+void
+SdpSctpmapAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mSctpmaps.begin(); i != mSctpmaps.end(); ++i) {
+ os << "a=" << mType << ":" << i->pt << " " << i->name << " " << i->streams
+ << CRLF;
+ }
+}
+
+void
+SdpSetupAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mRole << CRLF;
+}
+
+void
+SdpSimulcastAttribute::Version::Serialize(std::ostream& os) const
+{
+ SkipFirstDelimiter comma(",");
+ for (const std::string& choice : choices) {
+ os << comma << choice;
+ }
+}
+
+bool
+SdpSimulcastAttribute::Version::Parse(std::istream& is, std::string* error)
+{
+ do {
+ std::string value = ParseToken(is, ",; ", error);
+ if (value.empty()) {
+ return false;
+ }
+ choices.push_back(value);
+ } while (SkipChar(is, ',', error));
+
+ return true;
+}
+
+bool
+SdpSimulcastAttribute::Version::GetChoicesAsFormats(
+ std::vector<uint16_t>* formats) const
+{
+ for (const std::string& choice : choices) {
+ uint16_t format;
+ if (!SdpHelper::GetPtAsInt(choice, &format) || (format > 127)) {
+ return false;
+ }
+ formats->push_back(format);
+ }
+
+ return true;
+}
+
+void
+SdpSimulcastAttribute::Versions::Serialize(std::ostream& os) const
+{
+ switch (type) {
+ case kRid:
+ os << "rid=";
+ break;
+ case kPt:
+ os << "pt=";
+ break;
+ }
+
+ SkipFirstDelimiter semic(";");
+ for (const Version& version : *this) {
+ if (!version.IsSet()) {
+ continue;
+ }
+ os << semic;
+ version.Serialize(os);
+ }
+}
+
+bool
+SdpSimulcastAttribute::Versions::Parse(std::istream& is, std::string* error)
+{
+ std::string rawType = ParseKey(is, error);
+ if (rawType.empty()) {
+ return false;
+ }
+
+ if (rawType == "pt") {
+ type = kPt;
+ } else if (rawType == "rid") {
+ type = kRid;
+ } else {
+ *error = "Unknown simulcast identification type ";
+ error->append(rawType);
+ return false;
+ }
+
+ do {
+ Version version;
+ if (!version.Parse(is, error)) {
+ return false;
+ }
+
+ if (type == kPt) {
+ std::vector<uint16_t> formats;
+ if (!version.GetChoicesAsFormats(&formats)) {
+ *error = "Invalid payload type";
+ return false;
+ }
+ }
+
+ push_back(version);
+ } while(SkipChar(is, ';', error));
+
+ return true;
+}
+
+void
+SdpSimulcastAttribute::Serialize(std::ostream& os) const
+{
+ MOZ_ASSERT(sendVersions.IsSet() || recvVersions.IsSet());
+
+ os << "a=" << mType << ":";
+
+ if (sendVersions.IsSet()) {
+ os << " send ";
+ sendVersions.Serialize(os);
+ }
+
+ if (recvVersions.IsSet()) {
+ os << " recv ";
+ recvVersions.Serialize(os);
+ }
+
+ os << CRLF;
+}
+
+bool
+SdpSimulcastAttribute::Parse(std::istream& is, std::string* error)
+{
+ bool gotRecv = false;
+ bool gotSend = false;
+
+ while (true) {
+ is >> std::ws;
+ std::string token = ParseToken(is, " \t", error);
+ if (token.empty()) {
+ break;
+ }
+
+ if (token == "send") {
+ if (gotSend) {
+ *error = "Already got a send list";
+ return false;
+ }
+ gotSend = true;
+
+ is >> std::ws;
+ if (!sendVersions.Parse(is, error)) {
+ return false;
+ }
+ } else if (token == "recv") {
+ if (gotRecv) {
+ *error = "Already got a recv list";
+ return false;
+ }
+ gotRecv = true;
+
+ is >> std::ws;
+ if (!recvVersions.Parse(is, error)) {
+ return false;
+ }
+ } else {
+ *error = "Type must be either 'send' or 'recv'";
+ return false;
+ }
+ }
+
+ if (!gotSend && !gotRecv) {
+ *error = "Empty simulcast attribute";
+ return false;
+ }
+
+ return true;
+}
+
+void
+SdpSsrcAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mSsrcs.begin(); i != mSsrcs.end(); ++i) {
+ os << "a=" << mType << ":" << i->ssrc << " " << i->attribute << CRLF;
+ }
+}
+
+void
+SdpSsrcGroupAttributeList::Serialize(std::ostream& os) const
+{
+ for (auto i = mSsrcGroups.begin(); i != mSsrcGroups.end(); ++i) {
+ os << "a=" << mType << ":" << i->semantics;
+ for (auto j = i->ssrcs.begin(); j != i->ssrcs.end(); ++j) {
+ os << " " << (*j);
+ }
+ os << CRLF;
+ }
+}
+
+void
+SdpMultiStringAttribute::Serialize(std::ostream& os) const
+{
+ for (auto i = mValues.begin(); i != mValues.end(); ++i) {
+ os << "a=" << mType << ":" << *i << CRLF;
+ }
+}
+
+void
+SdpOptionsAttribute::Serialize(std::ostream& os) const
+{
+ if (mValues.empty()) {
+ return;
+ }
+
+ os << "a=" << mType << ":";
+
+ for (auto i = mValues.begin(); i != mValues.end(); ++i) {
+ if (i != mValues.begin()) {
+ os << " ";
+ }
+ os << *i;
+ }
+ os << CRLF;
+}
+
+void
+SdpOptionsAttribute::Load(const std::string& value)
+{
+ size_t start = 0;
+ size_t end = value.find(' ');
+ while (end != std::string::npos) {
+ PushEntry(value.substr(start, end));
+ start = end + 1;
+ end = value.find(' ', start);
+ }
+ PushEntry(value.substr(start));
+}
+
+void
+SdpFlagAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << CRLF;
+}
+
+void
+SdpStringAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mValue << CRLF;
+}
+
+void
+SdpNumberAttribute::Serialize(std::ostream& os) const
+{
+ os << "a=" << mType << ":" << mValue << CRLF;
+}
+
+bool
+SdpAttribute::IsAllowedAtMediaLevel(AttributeType type)
+{
+ switch (type) {
+ case kBundleOnlyAttribute:
+ return true;
+ case kCandidateAttribute:
+ return true;
+ case kConnectionAttribute:
+ return true;
+ case kDirectionAttribute:
+ return true;
+ case kDtlsMessageAttribute:
+ return false;
+ case kEndOfCandidatesAttribute:
+ return true;
+ case kExtmapAttribute:
+ return true;
+ case kFingerprintAttribute:
+ return true;
+ case kFmtpAttribute:
+ return true;
+ case kGroupAttribute:
+ return false;
+ case kIceLiteAttribute:
+ return false;
+ case kIceMismatchAttribute:
+ return true;
+ // RFC 5245 says this is session-level only, but
+ // draft-ietf-mmusic-ice-sip-sdp-03 updates this to allow at the media
+ // level.
+ case kIceOptionsAttribute:
+ return true;
+ case kIcePwdAttribute:
+ return true;
+ case kIceUfragAttribute:
+ return true;
+ case kIdentityAttribute:
+ return false;
+ case kImageattrAttribute:
+ return true;
+ case kInactiveAttribute:
+ return true;
+ case kLabelAttribute:
+ return true;
+ case kMaxptimeAttribute:
+ return true;
+ case kMidAttribute:
+ return true;
+ case kMsidAttribute:
+ return true;
+ case kMsidSemanticAttribute:
+ return false;
+ case kPtimeAttribute:
+ return true;
+ case kRecvonlyAttribute:
+ return true;
+ case kRemoteCandidatesAttribute:
+ return true;
+ case kRidAttribute:
+ return true;
+ case kRtcpAttribute:
+ return true;
+ case kRtcpFbAttribute:
+ return true;
+ case kRtcpMuxAttribute:
+ return true;
+ case kRtcpRsizeAttribute:
+ return true;
+ case kRtpmapAttribute:
+ return true;
+ case kSctpmapAttribute:
+ return true;
+ case kSendonlyAttribute:
+ return true;
+ case kSendrecvAttribute:
+ return true;
+ case kSetupAttribute:
+ return true;
+ case kSimulcastAttribute:
+ return true;
+ case kSsrcAttribute:
+ return true;
+ case kSsrcGroupAttribute:
+ return true;
+ }
+ MOZ_CRASH("Unknown attribute type");
+}
+
+bool
+SdpAttribute::IsAllowedAtSessionLevel(AttributeType type)
+{
+ switch (type) {
+ case kBundleOnlyAttribute:
+ return false;
+ case kCandidateAttribute:
+ return false;
+ case kConnectionAttribute:
+ return true;
+ case kDirectionAttribute:
+ return true;
+ case kDtlsMessageAttribute:
+ return true;
+ case kEndOfCandidatesAttribute:
+ return true;
+ case kExtmapAttribute:
+ return true;
+ case kFingerprintAttribute:
+ return true;
+ case kFmtpAttribute:
+ return false;
+ case kGroupAttribute:
+ return true;
+ case kIceLiteAttribute:
+ return true;
+ case kIceMismatchAttribute:
+ return false;
+ case kIceOptionsAttribute:
+ return true;
+ case kIcePwdAttribute:
+ return true;
+ case kIceUfragAttribute:
+ return true;
+ case kIdentityAttribute:
+ return true;
+ case kImageattrAttribute:
+ return false;
+ case kInactiveAttribute:
+ return true;
+ case kLabelAttribute:
+ return false;
+ case kMaxptimeAttribute:
+ return false;
+ case kMidAttribute:
+ return false;
+ case kMsidSemanticAttribute:
+ return true;
+ case kMsidAttribute:
+ return false;
+ case kPtimeAttribute:
+ return false;
+ case kRecvonlyAttribute:
+ return true;
+ case kRemoteCandidatesAttribute:
+ return false;
+ case kRidAttribute:
+ return false;
+ case kRtcpAttribute:
+ return false;
+ case kRtcpFbAttribute:
+ return false;
+ case kRtcpMuxAttribute:
+ return false;
+ case kRtcpRsizeAttribute:
+ return false;
+ case kRtpmapAttribute:
+ return false;
+ case kSctpmapAttribute:
+ return false;
+ case kSendonlyAttribute:
+ return true;
+ case kSendrecvAttribute:
+ return true;
+ case kSetupAttribute:
+ return true;
+ case kSimulcastAttribute:
+ return false;
+ case kSsrcAttribute:
+ return false;
+ case kSsrcGroupAttribute:
+ return false;
+ }
+ MOZ_CRASH("Unknown attribute type");
+}
+
+const std::string
+SdpAttribute::GetAttributeTypeString(AttributeType type)
+{
+ switch (type) {
+ case kBundleOnlyAttribute:
+ return "bundle-only";
+ case kCandidateAttribute:
+ return "candidate";
+ case kConnectionAttribute:
+ return "connection";
+ case kDtlsMessageAttribute:
+ return "dtls-message";
+ case kEndOfCandidatesAttribute:
+ return "end-of-candidates";
+ case kExtmapAttribute:
+ return "extmap";
+ case kFingerprintAttribute:
+ return "fingerprint";
+ case kFmtpAttribute:
+ return "fmtp";
+ case kGroupAttribute:
+ return "group";
+ case kIceLiteAttribute:
+ return "ice-lite";
+ case kIceMismatchAttribute:
+ return "ice-mismatch";
+ case kIceOptionsAttribute:
+ return "ice-options";
+ case kIcePwdAttribute:
+ return "ice-pwd";
+ case kIceUfragAttribute:
+ return "ice-ufrag";
+ case kIdentityAttribute:
+ return "identity";
+ case kImageattrAttribute:
+ return "imageattr";
+ case kInactiveAttribute:
+ return "inactive";
+ case kLabelAttribute:
+ return "label";
+ case kMaxptimeAttribute:
+ return "maxptime";
+ case kMidAttribute:
+ return "mid";
+ case kMsidAttribute:
+ return "msid";
+ case kMsidSemanticAttribute:
+ return "msid-semantic";
+ case kPtimeAttribute:
+ return "ptime";
+ case kRecvonlyAttribute:
+ return "recvonly";
+ case kRemoteCandidatesAttribute:
+ return "remote-candidates";
+ case kRidAttribute:
+ return "rid";
+ case kRtcpAttribute:
+ return "rtcp";
+ case kRtcpFbAttribute:
+ return "rtcp-fb";
+ case kRtcpMuxAttribute:
+ return "rtcp-mux";
+ case kRtcpRsizeAttribute:
+ return "rtcp-rsize";
+ case kRtpmapAttribute:
+ return "rtpmap";
+ case kSctpmapAttribute:
+ return "sctpmap";
+ case kSendonlyAttribute:
+ return "sendonly";
+ case kSendrecvAttribute:
+ return "sendrecv";
+ case kSetupAttribute:
+ return "setup";
+ case kSimulcastAttribute:
+ return "simulcast";
+ case kSsrcAttribute:
+ return "ssrc";
+ case kSsrcGroupAttribute:
+ return "ssrc-group";
+ case kDirectionAttribute:
+ MOZ_CRASH("kDirectionAttribute not valid here");
+ }
+ MOZ_CRASH("Unknown attribute type");
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/sdp/SdpAttribute.h b/media/webrtc/signaling/src/sdp/SdpAttribute.h
new file mode 100644
index 000000000..d3cf547ff
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpAttribute.h
@@ -0,0 +1,1788 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDPATTRIBUTE_H_
+#define _SDPATTRIBUTE_H_
+
+#include <algorithm>
+#include <cctype>
+#include <vector>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+#include <iomanip>
+#include <string>
+
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Maybe.h"
+
+#include "signaling/src/sdp/SdpEnum.h"
+#include "signaling/src/common/EncodingConstraints.h"
+
+namespace mozilla
+{
+
+/**
+ * Base class for SDP attributes
+*/
+class SdpAttribute
+{
+public:
+ enum AttributeType {
+ kFirstAttribute = 0,
+ kBundleOnlyAttribute = 0,
+ kCandidateAttribute,
+ kConnectionAttribute,
+ kDirectionAttribute,
+ kDtlsMessageAttribute,
+ kEndOfCandidatesAttribute,
+ kExtmapAttribute,
+ kFingerprintAttribute,
+ kFmtpAttribute,
+ kGroupAttribute,
+ kIceLiteAttribute,
+ kIceMismatchAttribute,
+ kIceOptionsAttribute,
+ kIcePwdAttribute,
+ kIceUfragAttribute,
+ kIdentityAttribute,
+ kImageattrAttribute,
+ kInactiveAttribute,
+ kLabelAttribute,
+ kMaxptimeAttribute,
+ kMidAttribute,
+ kMsidAttribute,
+ kMsidSemanticAttribute,
+ kPtimeAttribute,
+ kRecvonlyAttribute,
+ kRemoteCandidatesAttribute,
+ kRidAttribute,
+ kRtcpAttribute,
+ kRtcpFbAttribute,
+ kRtcpMuxAttribute,
+ kRtcpRsizeAttribute,
+ kRtpmapAttribute,
+ kSctpmapAttribute,
+ kSendonlyAttribute,
+ kSendrecvAttribute,
+ kSetupAttribute,
+ kSimulcastAttribute,
+ kSsrcAttribute,
+ kSsrcGroupAttribute,
+ kLastAttribute = kSsrcGroupAttribute
+ };
+
+ explicit SdpAttribute(AttributeType type) : mType(type) {}
+ virtual ~SdpAttribute() {}
+
+ AttributeType
+ GetType() const
+ {
+ return mType;
+ }
+
+ virtual void Serialize(std::ostream&) const = 0;
+
+ static bool IsAllowedAtSessionLevel(AttributeType type);
+ static bool IsAllowedAtMediaLevel(AttributeType type);
+ static const std::string GetAttributeTypeString(AttributeType type);
+
+protected:
+ AttributeType mType;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SdpAttribute& attr)
+{
+ attr.Serialize(os);
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os,
+ const SdpAttribute::AttributeType type)
+{
+ os << SdpAttribute::GetAttributeTypeString(type);
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=candidate, RFC5245
+//-------------------------------------------------------------------------
+//
+// candidate-attribute = "candidate" ":" foundation SP component-id SP
+// transport SP
+// priority SP
+// connection-address SP ;from RFC 4566
+// port ;port from RFC 4566
+// SP cand-type
+// [SP rel-addr]
+// [SP rel-port]
+// *(SP extension-att-name SP
+// extension-att-value)
+// foundation = 1*32ice-char
+// component-id = 1*5DIGIT
+// transport = "UDP" / transport-extension
+// transport-extension = token ; from RFC 3261
+// priority = 1*10DIGIT
+// cand-type = "typ" SP candidate-types
+// candidate-types = "host" / "srflx" / "prflx" / "relay" / token
+// rel-addr = "raddr" SP connection-address
+// rel-port = "rport" SP port
+// extension-att-name = byte-string ;from RFC 4566
+// extension-att-value = byte-string
+// ice-char = ALPHA / DIGIT / "+" / "/"
+
+// We use a SdpMultiStringAttribute for candidates
+
+///////////////////////////////////////////////////////////////////////////
+// a=connection, RFC4145
+//-------------------------------------------------------------------------
+// connection-attr = "a=connection:" conn-value
+// conn-value = "new" / "existing"
+class SdpConnectionAttribute : public SdpAttribute
+{
+public:
+ enum ConnValue { kNew, kExisting };
+
+ explicit SdpConnectionAttribute(SdpConnectionAttribute::ConnValue value)
+ : SdpAttribute(kConnectionAttribute), mValue(value)
+ {
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ ConnValue mValue;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpConnectionAttribute::ConnValue c)
+{
+ switch (c) {
+ case SdpConnectionAttribute::kNew:
+ os << "new";
+ break;
+ case SdpConnectionAttribute::kExisting:
+ os << "existing";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=sendrecv / a=sendonly / a=recvonly / a=inactive, RFC 4566
+//-------------------------------------------------------------------------
+class SdpDirectionAttribute : public SdpAttribute
+{
+public:
+ enum Direction {
+ kInactive = 0,
+ kSendonly = sdp::kSend,
+ kRecvonly = sdp::kRecv,
+ kSendrecv = sdp::kSend | sdp::kRecv
+ };
+
+ explicit SdpDirectionAttribute(Direction value)
+ : SdpAttribute(kDirectionAttribute), mValue(value)
+ {
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ Direction mValue;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpDirectionAttribute::Direction d)
+{
+ switch (d) {
+ case SdpDirectionAttribute::kSendonly:
+ os << "sendonly";
+ break;
+ case SdpDirectionAttribute::kRecvonly:
+ os << "recvonly";
+ break;
+ case SdpDirectionAttribute::kSendrecv:
+ os << "sendrecv";
+ break;
+ case SdpDirectionAttribute::kInactive:
+ os << "inactive";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=dtls-message, draft-rescorla-dtls-in-sdp
+//-------------------------------------------------------------------------
+// attribute =/ dtls-message-attribute
+//
+// dtls-message-attribute = "dtls-message" ":" role SP value
+//
+// role = "client" / "server"
+//
+// value = 1*(ALPHA / DIGIT / "+" / "/" / "=" )
+// ; base64 encoded message
+class SdpDtlsMessageAttribute : public SdpAttribute
+{
+public:
+ enum Role {
+ kClient,
+ kServer
+ };
+
+ explicit SdpDtlsMessageAttribute(Role role, const std::string& value)
+ : SdpAttribute(kDtlsMessageAttribute),
+ mRole(role),
+ mValue(value)
+ {}
+
+ explicit SdpDtlsMessageAttribute(const std::string& unparsed)
+ : SdpAttribute(kDtlsMessageAttribute),
+ mRole(kClient)
+ {
+ std::istringstream is(unparsed);
+ std::string error;
+ // We're not really worried about errors here if we don't parse;
+ // this attribute is a pure optimization.
+ Parse(is, &error);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+ bool Parse(std::istream& is, std::string* error);
+
+ Role mRole;
+ std::string mValue;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpDtlsMessageAttribute::Role r)
+{
+ switch (r) {
+ case SdpDtlsMessageAttribute::kClient:
+ os << "client";
+ break;
+ case SdpDtlsMessageAttribute::kServer:
+ os << "server";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+
+///////////////////////////////////////////////////////////////////////////
+// a=extmap, RFC5285
+//-------------------------------------------------------------------------
+// RFC5285
+// extmap = mapentry SP extensionname [SP extensionattributes]
+//
+// extensionname = URI
+//
+// direction = "sendonly" / "recvonly" / "sendrecv" / "inactive"
+//
+// mapentry = "extmap:" 1*5DIGIT ["/" direction]
+//
+// extensionattributes = byte-string
+//
+// URI = <Defined in RFC 3986>
+//
+// byte-string = <Defined in RFC 4566>
+//
+// SP = <Defined in RFC 5234>
+//
+// DIGIT = <Defined in RFC 5234>
+class SdpExtmapAttributeList : public SdpAttribute
+{
+public:
+ SdpExtmapAttributeList() : SdpAttribute(kExtmapAttribute) {}
+
+ struct Extmap {
+ uint16_t entry;
+ SdpDirectionAttribute::Direction direction;
+ bool direction_specified;
+ std::string extensionname;
+ std::string extensionattributes;
+ };
+
+ void
+ PushEntry(uint16_t entry, SdpDirectionAttribute::Direction direction,
+ bool direction_specified, const std::string& extensionname,
+ const std::string& extensionattributes = "")
+ {
+ Extmap value = { entry, direction, direction_specified, extensionname,
+ extensionattributes };
+ mExtmaps.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Extmap> mExtmaps;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=fingerprint, RFC4572
+//-------------------------------------------------------------------------
+// fingerprint-attribute = "fingerprint" ":" hash-func SP fingerprint
+//
+// hash-func = "sha-1" / "sha-224" / "sha-256" /
+// "sha-384" / "sha-512" /
+// "md5" / "md2" / token
+// ; Additional hash functions can only come
+// ; from updates to RFC 3279
+//
+// fingerprint = 2UHEX *(":" 2UHEX)
+// ; Each byte in upper-case hex, separated
+// ; by colons.
+//
+// UHEX = DIGIT / %x41-46 ; A-F uppercase
+class SdpFingerprintAttributeList : public SdpAttribute
+{
+public:
+ SdpFingerprintAttributeList() : SdpAttribute(kFingerprintAttribute) {}
+
+ enum HashAlgorithm {
+ kSha1,
+ kSha224,
+ kSha256,
+ kSha384,
+ kSha512,
+ kMd5,
+ kMd2,
+ kUnknownAlgorithm
+ };
+
+ struct Fingerprint {
+ HashAlgorithm hashFunc;
+ std::vector<uint8_t> fingerprint;
+ };
+
+ // For use by application programmers. Enforces that it's a known and
+ // non-crazy algorithm.
+ void
+ PushEntry(std::string algorithm_str,
+ const std::vector<uint8_t>& fingerprint,
+ bool enforcePlausible = true)
+ {
+ std::transform(algorithm_str.begin(),
+ algorithm_str.end(),
+ algorithm_str.begin(),
+ ::tolower);
+
+ SdpFingerprintAttributeList::HashAlgorithm algorithm =
+ SdpFingerprintAttributeList::kUnknownAlgorithm;
+
+ if (algorithm_str == "sha-1") {
+ algorithm = SdpFingerprintAttributeList::kSha1;
+ } else if (algorithm_str == "sha-224") {
+ algorithm = SdpFingerprintAttributeList::kSha224;
+ } else if (algorithm_str == "sha-256") {
+ algorithm = SdpFingerprintAttributeList::kSha256;
+ } else if (algorithm_str == "sha-384") {
+ algorithm = SdpFingerprintAttributeList::kSha384;
+ } else if (algorithm_str == "sha-512") {
+ algorithm = SdpFingerprintAttributeList::kSha512;
+ } else if (algorithm_str == "md5") {
+ algorithm = SdpFingerprintAttributeList::kMd5;
+ } else if (algorithm_str == "md2") {
+ algorithm = SdpFingerprintAttributeList::kMd2;
+ }
+
+ if ((algorithm == SdpFingerprintAttributeList::kUnknownAlgorithm) ||
+ fingerprint.empty()) {
+ if (enforcePlausible) {
+ MOZ_ASSERT(false, "Unknown fingerprint algorithm");
+ } else {
+ return;
+ }
+ }
+
+ PushEntry(algorithm, fingerprint);
+ }
+
+ void
+ PushEntry(HashAlgorithm hashFunc, const std::vector<uint8_t>& fingerprint)
+ {
+ Fingerprint value = { hashFunc, fingerprint };
+ mFingerprints.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Fingerprint> mFingerprints;
+
+ static std::string FormatFingerprint(const std::vector<uint8_t>& fp);
+ static std::vector<uint8_t> ParseFingerprint(const std::string& str);
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpFingerprintAttributeList::HashAlgorithm a)
+{
+ switch (a) {
+ case SdpFingerprintAttributeList::kSha1:
+ os << "sha-1";
+ break;
+ case SdpFingerprintAttributeList::kSha224:
+ os << "sha-224";
+ break;
+ case SdpFingerprintAttributeList::kSha256:
+ os << "sha-256";
+ break;
+ case SdpFingerprintAttributeList::kSha384:
+ os << "sha-384";
+ break;
+ case SdpFingerprintAttributeList::kSha512:
+ os << "sha-512";
+ break;
+ case SdpFingerprintAttributeList::kMd5:
+ os << "md5";
+ break;
+ case SdpFingerprintAttributeList::kMd2:
+ os << "md2";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=group, RFC5888
+//-------------------------------------------------------------------------
+// group-attribute = "a=group:" semantics
+// *(SP identification-tag)
+// semantics = "LS" / "FID" / semantics-extension
+// semantics-extension = token
+// identification-tag = token
+class SdpGroupAttributeList : public SdpAttribute
+{
+public:
+ SdpGroupAttributeList() : SdpAttribute(kGroupAttribute) {}
+
+ enum Semantics {
+ kLs, // RFC5888
+ kFid, // RFC5888
+ kSrf, // RFC3524
+ kAnat, // RFC4091
+ kFec, // RFC5956
+ kFecFr, // RFC5956
+ kCs, // draft-mehta-rmt-flute-sdp-05
+ kDdp, // RFC5583
+ kDup, // RFC7104
+ kBundle // draft-ietf-mmusic-bundle
+ };
+
+ struct Group {
+ Semantics semantics;
+ std::vector<std::string> tags;
+ };
+
+ void
+ PushEntry(Semantics semantics, const std::vector<std::string>& tags)
+ {
+ Group value = { semantics, tags };
+ mGroups.push_back(value);
+ }
+
+ void
+ RemoveMid(const std::string& mid)
+ {
+ for (auto i = mGroups.begin(); i != mGroups.end();) {
+ auto tag = std::find(i->tags.begin(), i->tags.end(), mid);
+ if (tag != i->tags.end()) {
+ i->tags.erase(tag);
+ }
+
+ if (i->tags.empty()) {
+ i = mGroups.erase(i);
+ } else {
+ ++i;
+ }
+ }
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Group> mGroups;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpGroupAttributeList::Semantics s)
+{
+ switch (s) {
+ case SdpGroupAttributeList::kLs:
+ os << "LS";
+ break;
+ case SdpGroupAttributeList::kFid:
+ os << "FID";
+ break;
+ case SdpGroupAttributeList::kSrf:
+ os << "SRF";
+ break;
+ case SdpGroupAttributeList::kAnat:
+ os << "ANAT";
+ break;
+ case SdpGroupAttributeList::kFec:
+ os << "FEC";
+ break;
+ case SdpGroupAttributeList::kFecFr:
+ os << "FEC-FR";
+ break;
+ case SdpGroupAttributeList::kCs:
+ os << "CS";
+ break;
+ case SdpGroupAttributeList::kDdp:
+ os << "DDP";
+ break;
+ case SdpGroupAttributeList::kDup:
+ os << "DUP";
+ break;
+ case SdpGroupAttributeList::kBundle:
+ os << "BUNDLE";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=identity, draft-ietf-rtcweb-security-arch
+//-------------------------------------------------------------------------
+// identity-attribute = "identity:" identity-assertion
+// [ SP identity-extension
+// *(";" [ SP ] identity-extension) ]
+// identity-assertion = base64
+// base64 = 1*(ALPHA / DIGIT / "+" / "/" / "=" )
+// identity-extension = extension-att-name [ "=" extension-att-value ]
+// extension-att-name = token
+// extension-att-value = 1*(%x01-09 / %x0b-0c / %x0e-3a / %x3c-ff)
+// ; byte-string from [RFC4566] omitting ";"
+
+// We're just using an SdpStringAttribute for this right now
+#if 0
+class SdpIdentityAttribute : public SdpAttribute
+{
+public:
+ explicit SdpIdentityAttribute(const std::string &assertion,
+ const std::vector<std::string> &extensions =
+ std::vector<std::string>()) :
+ SdpAttribute(kIdentityAttribute),
+ mAssertion(assertion),
+ mExtensions(extensions) {}
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::string mAssertion;
+ std::vector<std::string> mExtensions;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////
+// a=imageattr, RFC6236
+//-------------------------------------------------------------------------
+// image-attr = "imageattr:" PT 1*2( 1*WSP ( "send" / "recv" )
+// 1*WSP attr-list )
+// PT = 1*DIGIT / "*"
+// attr-list = ( set *(1*WSP set) ) / "*"
+// ; WSP and DIGIT defined in [RFC5234]
+//
+// set= "[" "x=" xyrange "," "y=" xyrange *( "," key-value ) "]"
+// ; x is the horizontal image size range (pixel count)
+// ; y is the vertical image size range (pixel count)
+//
+// key-value = ( "sar=" srange )
+// / ( "par=" prange )
+// / ( "q=" qvalue )
+// ; Key-value MAY be extended with other keyword
+// ; parameters.
+// ; At most, one instance each of sar, par, or q
+// ; is allowed in a set.
+// ;
+// ; sar (sample aspect ratio) is the sample aspect ratio
+// ; associated with the set (optional, MAY be ignored)
+// ; par (picture aspect ratio) is the allowed
+// ; ratio between the display's x and y physical
+// ; size (optional)
+// ; q (optional, range [0.0..1.0], default value 0.5)
+// ; is the preference for the given set,
+// ; a higher value means a higher preference
+//
+// onetonine = "1" / "2" / "3" / "4" / "5" / "6" / "7" / "8" / "9"
+// ; Digit between 1 and 9
+// xyvalue = onetonine *5DIGIT
+// ; Digit between 1 and 9 that is
+// ; followed by 0 to 5 other digits
+// step = xyvalue
+// xyrange = ( "[" xyvalue ":" [ step ":" ] xyvalue "]" )
+// ; Range between a lower and an upper value
+// ; with an optional step, default step = 1
+// ; The rightmost occurrence of xyvalue MUST have a
+// ; higher value than the leftmost occurrence.
+// / ( "[" xyvalue 1*( "," xyvalue ) "]" )
+// ; Discrete values separated by ','
+// / ( xyvalue )
+// ; A single value
+// spvalue = ( "0" "." onetonine *3DIGIT )
+// ; Values between 0.1000 and 0.9999
+// / ( onetonine "." 1*4DIGIT )
+// ; Values between 1.0000 and 9.9999
+// srange = ( "[" spvalue 1*( "," spvalue ) "]" )
+// ; Discrete values separated by ','.
+// ; Each occurrence of spvalue MUST be
+// ; greater than the previous occurrence.
+// / ( "[" spvalue "-" spvalue "]" )
+// ; Range between a lower and an upper level (inclusive)
+// ; The second occurrence of spvalue MUST have a higher
+// ; value than the first
+// / ( spvalue )
+// ; A single value
+//
+// prange = ( "[" spvalue "-" spvalue "]" )
+// ; Range between a lower and an upper level (inclusive)
+// ; The second occurrence of spvalue MUST have a higher
+// ; value than the first
+//
+// qvalue = ( "0" "." 1*2DIGIT )
+// / ( "1" "." 1*2("0") )
+// ; Values between 0.00 and 1.00
+//
+// XXX TBD -- We don't use this yet, and it's a project unto itself.
+//
+
+class SdpImageattrAttributeList : public SdpAttribute
+{
+public:
+ SdpImageattrAttributeList() : SdpAttribute(kImageattrAttribute) {}
+
+ class XYRange
+ {
+ public:
+ XYRange() : min(0), max(0), step(1) {}
+ void Serialize(std::ostream& os) const;
+ bool Parse(std::istream& is, std::string* error);
+ bool ParseAfterBracket(std::istream& is, std::string* error);
+ bool ParseAfterMin(std::istream& is, std::string* error);
+ bool ParseDiscreteValues(std::istream& is, std::string* error);
+ std::vector<uint32_t> discreteValues;
+ // min/max are used iff discreteValues is empty
+ uint32_t min;
+ uint32_t max;
+ uint32_t step;
+ };
+
+ class SRange
+ {
+ public:
+ SRange() : min(0), max(0) {}
+ void Serialize(std::ostream& os) const;
+ bool Parse(std::istream& is, std::string* error);
+ bool ParseAfterBracket(std::istream& is, std::string* error);
+ bool ParseAfterMin(std::istream& is, std::string* error);
+ bool ParseDiscreteValues(std::istream& is, std::string* error);
+ bool IsSet() const
+ {
+ return !discreteValues.empty() || (min && max);
+ }
+ std::vector<float> discreteValues;
+ // min/max are used iff discreteValues is empty
+ float min;
+ float max;
+ };
+
+ class PRange
+ {
+ public:
+ PRange() : min(0), max(0) {}
+ void Serialize(std::ostream& os) const;
+ bool Parse(std::istream& is, std::string* error);
+ bool IsSet() const
+ {
+ return min && max;
+ }
+ float min;
+ float max;
+ };
+
+ class Set
+ {
+ public:
+ Set() : qValue(-1) {}
+ void Serialize(std::ostream& os) const;
+ bool Parse(std::istream& is, std::string* error);
+ XYRange xRange;
+ XYRange yRange;
+ SRange sRange;
+ PRange pRange;
+ float qValue;
+ };
+
+ class Imageattr
+ {
+ public:
+ Imageattr() : pt(), sendAll(false), recvAll(false) {}
+ void Serialize(std::ostream& os) const;
+ bool Parse(std::istream& is, std::string* error);
+ bool ParseSets(std::istream& is, std::string* error);
+ // If not set, this means all payload types
+ Maybe<uint16_t> pt;
+ bool sendAll;
+ std::vector<Set> sendSets;
+ bool recvAll;
+ std::vector<Set> recvSets;
+ };
+
+ virtual void Serialize(std::ostream& os) const override;
+ bool PushEntry(const std::string& raw, std::string* error, size_t* errorPos);
+
+ std::vector<Imageattr> mImageattrs;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=msid, draft-ietf-mmusic-msid
+//-------------------------------------------------------------------------
+// msid-attr = "msid:" identifier [ SP appdata ]
+// identifier = 1*64token-char ; see RFC 4566
+// appdata = 1*64token-char ; see RFC 4566
+class SdpMsidAttributeList : public SdpAttribute
+{
+public:
+ SdpMsidAttributeList() : SdpAttribute(kMsidAttribute) {}
+
+ struct Msid {
+ std::string identifier;
+ std::string appdata;
+ };
+
+ void
+ PushEntry(const std::string& identifier, const std::string& appdata = "")
+ {
+ Msid value = { identifier, appdata };
+ mMsids.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Msid> mMsids;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=msid-semantic, draft-ietf-mmusic-msid
+//-------------------------------------------------------------------------
+// msid-semantic-attr = "msid-semantic:" msid-semantic msid-list
+// msid-semantic = token ; see RFC 4566
+// msid-list = *(" " msid-id) / " *"
+class SdpMsidSemanticAttributeList : public SdpAttribute
+{
+public:
+ SdpMsidSemanticAttributeList() : SdpAttribute(kMsidSemanticAttribute) {}
+
+ struct MsidSemantic
+ {
+ // TODO: Once we have some more of these, we might want to make an enum
+ std::string semantic;
+ std::vector<std::string> msids;
+ };
+
+ void
+ PushEntry(const std::string& semantic, const std::vector<std::string>& msids)
+ {
+ MsidSemantic value = {semantic, msids};
+ mMsidSemantics.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<MsidSemantic> mMsidSemantics;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=remote-candiate, RFC5245
+//-------------------------------------------------------------------------
+// remote-candidate-att = "remote-candidates" ":" remote-candidate
+// 0*(SP remote-candidate)
+// remote-candidate = component-ID SP connection-address SP port
+class SdpRemoteCandidatesAttribute : public SdpAttribute
+{
+public:
+ struct Candidate {
+ std::string id;
+ std::string address;
+ uint16_t port;
+ };
+
+ explicit SdpRemoteCandidatesAttribute(
+ const std::vector<Candidate>& candidates)
+ : SdpAttribute(kRemoteCandidatesAttribute), mCandidates(candidates)
+ {
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Candidate> mCandidates;
+};
+
+/*
+a=rid, draft-pthatcher-mmusic-rid-01
+
+ rid-syntax = "a=rid:" rid-identifier SP rid-dir
+ [ rid-pt-param-list / rid-param-list ]
+
+ rid-identifier = 1*(alpha-numeric / "-" / "_")
+
+ rid-dir = "send" / "recv"
+
+ rid-pt-param-list = SP rid-fmt-list *(";" rid-param)
+
+ rid-param-list = SP rid-param *(";" rid-param)
+
+ rid-fmt-list = "pt=" fmt *( "," fmt )
+ ; fmt defined in {{RFC4566}}
+
+ rid-param = rid-width-param
+ / rid-height-param
+ / rid-fps-param
+ / rid-fs-param
+ / rid-br-param
+ / rid-pps-param
+ / rid-depend-param
+ / rid-param-other
+
+ rid-width-param = "max-width" [ "=" int-param-val ]
+
+ rid-height-param = "max-height" [ "=" int-param-val ]
+
+ rid-fps-param = "max-fps" [ "=" int-param-val ]
+
+ rid-fs-param = "max-fs" [ "=" int-param-val ]
+
+ rid-br-param = "max-br" [ "=" int-param-val ]
+
+ rid-pps-param = "max-pps" [ "=" int-param-val ]
+
+ rid-depend-param = "depend=" rid-list
+
+ rid-param-other = 1*(alpha-numeric / "-") [ "=" param-val ]
+
+ rid-list = rid-identifier *( "," rid-identifier )
+
+ int-param-val = 1*DIGIT
+
+ param-val = *( %x20-58 / %x60-7E )
+ ; Any printable character except semicolon
+*/
+class SdpRidAttributeList : public SdpAttribute
+{
+public:
+ explicit SdpRidAttributeList()
+ : SdpAttribute(kRidAttribute)
+ {}
+
+ struct Rid
+ {
+ Rid() :
+ direction(sdp::kSend)
+ {}
+
+ bool Parse(std::istream& is, std::string* error);
+ bool ParseParameters(std::istream& is, std::string* error);
+ bool ParseDepend(std::istream& is, std::string* error);
+ bool ParseFormats(std::istream& is, std::string* error);
+ void Serialize(std::ostream& os) const;
+ void SerializeParameters(std::ostream& os) const;
+ bool HasFormat(const std::string& format) const;
+ bool HasParameters() const
+ {
+ return !formats.empty() ||
+ constraints.maxWidth ||
+ constraints.maxHeight ||
+ constraints.maxFps ||
+ constraints.maxFs ||
+ constraints.maxBr ||
+ constraints.maxPps ||
+ !dependIds.empty();
+ }
+
+
+ std::string id;
+ sdp::Direction direction;
+ std::vector<uint16_t> formats; // Empty implies all
+ EncodingConstraints constraints;
+ std::vector<std::string> dependIds;
+ };
+
+ virtual void Serialize(std::ostream& os) const override;
+ bool PushEntry(const std::string& raw, std::string* error, size_t* errorPos);
+
+ std::vector<Rid> mRids;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=rtcp, RFC3605
+//-------------------------------------------------------------------------
+// rtcp-attribute = "a=rtcp:" port [nettype space addrtype space
+// connection-address] CRLF
+class SdpRtcpAttribute : public SdpAttribute
+{
+public:
+ explicit SdpRtcpAttribute(uint16_t port)
+ : SdpAttribute(kRtcpAttribute),
+ mPort(port),
+ mNetType(sdp::kNetTypeNone),
+ mAddrType(sdp::kAddrTypeNone)
+ {}
+
+ SdpRtcpAttribute(uint16_t port,
+ sdp::NetType netType,
+ sdp::AddrType addrType,
+ const std::string& address)
+ : SdpAttribute(kRtcpAttribute),
+ mPort(port),
+ mNetType(netType),
+ mAddrType(addrType),
+ mAddress(address)
+ {
+ MOZ_ASSERT(netType != sdp::kNetTypeNone);
+ MOZ_ASSERT(addrType != sdp::kAddrTypeNone);
+ MOZ_ASSERT(!address.empty());
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ uint16_t mPort;
+ sdp::NetType mNetType;
+ sdp::AddrType mAddrType;
+ std::string mAddress;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=rtcp-fb, RFC4585
+//-------------------------------------------------------------------------
+// rtcp-fb-syntax = "a=rtcp-fb:" rtcp-fb-pt SP rtcp-fb-val CRLF
+//
+// rtcp-fb-pt = "*" ; wildcard: applies to all formats
+// / fmt ; as defined in SDP spec
+//
+// rtcp-fb-val = "ack" rtcp-fb-ack-param
+// / "nack" rtcp-fb-nack-param
+// / "trr-int" SP 1*DIGIT
+// / rtcp-fb-id rtcp-fb-param
+//
+// rtcp-fb-id = 1*(alpha-numeric / "-" / "_")
+//
+// rtcp-fb-param = SP "app" [SP byte-string]
+// / SP token [SP byte-string]
+// / ; empty
+//
+// rtcp-fb-ack-param = SP "rpsi"
+// / SP "app" [SP byte-string]
+// / SP token [SP byte-string]
+// / ; empty
+//
+// rtcp-fb-nack-param = SP "pli"
+// / SP "sli"
+// / SP "rpsi"
+// / SP "app" [SP byte-string]
+// / SP token [SP byte-string]
+// / ; empty
+//
+class SdpRtcpFbAttributeList : public SdpAttribute
+{
+public:
+ SdpRtcpFbAttributeList() : SdpAttribute(kRtcpFbAttribute) {}
+
+ enum Type { kAck, kApp, kCcm, kNack, kTrrInt, kRemb };
+
+ static const char* pli;
+ static const char* sli;
+ static const char* rpsi;
+ static const char* app;
+
+ static const char* fir;
+ static const char* tmmbr;
+ static const char* tstr;
+ static const char* vbcm;
+
+ struct Feedback {
+ std::string pt;
+ Type type;
+ std::string parameter;
+ std::string extra;
+ };
+
+ void
+ PushEntry(const std::string& pt, Type type, const std::string& parameter = "",
+ const std::string& extra = "")
+ {
+ Feedback value = { pt, type, parameter, extra };
+ mFeedbacks.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Feedback> mFeedbacks;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpRtcpFbAttributeList::Type type)
+{
+ switch (type) {
+ case SdpRtcpFbAttributeList::kAck:
+ os << "ack";
+ break;
+ case SdpRtcpFbAttributeList::kApp:
+ os << "app";
+ break;
+ case SdpRtcpFbAttributeList::kCcm:
+ os << "ccm";
+ break;
+ case SdpRtcpFbAttributeList::kNack:
+ os << "nack";
+ break;
+ case SdpRtcpFbAttributeList::kTrrInt:
+ os << "trr-int";
+ break;
+ case SdpRtcpFbAttributeList::kRemb:
+ os << "goog-remb";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=rtpmap, RFC4566
+//-------------------------------------------------------------------------
+// a=rtpmap:<payload type> <encoding name>/<clock rate> [/<encoding parameters>]
+class SdpRtpmapAttributeList : public SdpAttribute
+{
+public:
+ SdpRtpmapAttributeList() : SdpAttribute(kRtpmapAttribute) {}
+
+ // Minimal set to get going
+ enum CodecType {
+ kOpus,
+ kG722,
+ kPCMU,
+ kPCMA,
+ kVP8,
+ kVP9,
+ kiLBC,
+ kiSAC,
+ kH264,
+ kRed,
+ kUlpfec,
+ kTelephoneEvent,
+ kOtherCodec
+ };
+
+ struct Rtpmap {
+ std::string pt;
+ CodecType codec;
+ std::string name;
+ uint32_t clock;
+ // Technically, this could mean something else in the future.
+ // In practice, that's probably not going to happen.
+ uint32_t channels;
+ };
+
+ void
+ PushEntry(const std::string& pt, CodecType codec, const std::string& name,
+ uint32_t clock, uint32_t channels = 0)
+ {
+ Rtpmap value = { pt, codec, name, clock, channels };
+ mRtpmaps.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ bool
+ HasEntry(const std::string& pt) const
+ {
+ for (auto it = mRtpmaps.begin(); it != mRtpmaps.end(); ++it) {
+ if (it->pt == pt) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ const Rtpmap&
+ GetEntry(const std::string& pt) const
+ {
+ for (auto it = mRtpmaps.begin(); it != mRtpmaps.end(); ++it) {
+ if (it->pt == pt) {
+ return *it;
+ }
+ }
+ MOZ_CRASH();
+ }
+
+ std::vector<Rtpmap> mRtpmaps;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpRtpmapAttributeList::CodecType c)
+{
+ switch (c) {
+ case SdpRtpmapAttributeList::kOpus:
+ os << "opus";
+ break;
+ case SdpRtpmapAttributeList::kG722:
+ os << "G722";
+ break;
+ case SdpRtpmapAttributeList::kPCMU:
+ os << "PCMU";
+ break;
+ case SdpRtpmapAttributeList::kPCMA:
+ os << "PCMA";
+ break;
+ case SdpRtpmapAttributeList::kVP8:
+ os << "VP8";
+ break;
+ case SdpRtpmapAttributeList::kVP9:
+ os << "VP9";
+ break;
+ case SdpRtpmapAttributeList::kiLBC:
+ os << "iLBC";
+ break;
+ case SdpRtpmapAttributeList::kiSAC:
+ os << "iSAC";
+ break;
+ case SdpRtpmapAttributeList::kH264:
+ os << "H264";
+ break;
+ case SdpRtpmapAttributeList::kRed:
+ os << "red";
+ break;
+ case SdpRtpmapAttributeList::kUlpfec:
+ os << "ulpfec";
+ break;
+ case SdpRtpmapAttributeList::kTelephoneEvent:
+ os << "telephone-event";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// a=fmtp, RFC4566, RFC5576
+//-------------------------------------------------------------------------
+// a=fmtp:<format> <format specific parameters>
+//
+class SdpFmtpAttributeList : public SdpAttribute
+{
+public:
+ SdpFmtpAttributeList() : SdpAttribute(kFmtpAttribute) {}
+
+ // Base class for format parameters
+ class Parameters
+ {
+ public:
+ explicit Parameters(SdpRtpmapAttributeList::CodecType aCodec)
+ : codec_type(aCodec)
+ {
+ }
+
+ virtual ~Parameters() {}
+ virtual Parameters* Clone() const = 0;
+ virtual void Serialize(std::ostream& os) const = 0;
+
+ SdpRtpmapAttributeList::CodecType codec_type;
+ };
+
+ class RedParameters : public Parameters
+ {
+ public:
+ RedParameters()
+ : Parameters(SdpRtpmapAttributeList::kRed)
+ {
+ }
+
+ virtual Parameters*
+ Clone() const override
+ {
+ return new RedParameters(*this);
+ }
+
+ virtual void
+ Serialize(std::ostream& os) const override
+ {
+ for(size_t i = 0; i < encodings.size(); ++i) {
+ os << (i != 0 ? "/" : "")
+ << std::to_string(encodings[i]);
+ }
+ }
+
+ std::vector<uint8_t> encodings;
+ };
+
+ class H264Parameters : public Parameters
+ {
+ public:
+ static const uint32_t kDefaultProfileLevelId = 0x420010;
+
+ H264Parameters()
+ : Parameters(SdpRtpmapAttributeList::kH264),
+ packetization_mode(0),
+ level_asymmetry_allowed(false),
+ profile_level_id(kDefaultProfileLevelId),
+ max_mbps(0),
+ max_fs(0),
+ max_cpb(0),
+ max_dpb(0),
+ max_br(0)
+ {
+ memset(sprop_parameter_sets, 0, sizeof(sprop_parameter_sets));
+ }
+
+ virtual Parameters*
+ Clone() const override
+ {
+ return new H264Parameters(*this);
+ }
+
+ virtual void
+ Serialize(std::ostream& os) const override
+ {
+ // Note: don't move this, since having an unconditional param up top
+ // lets us avoid a whole bunch of conditional streaming of ';' below
+ os << "profile-level-id=" << std::hex << std::setfill('0') << std::setw(6)
+ << profile_level_id << std::dec << std::setfill(' ');
+
+ os << ";level-asymmetry-allowed=" << (level_asymmetry_allowed ? 1 : 0);
+
+ if (strlen(sprop_parameter_sets)) {
+ os << ";sprop-parameter-sets=" << sprop_parameter_sets;
+ }
+
+ if (packetization_mode != 0) {
+ os << ";packetization-mode=" << packetization_mode;
+ }
+
+ if (max_mbps != 0) {
+ os << ";max-mbps=" << max_mbps;
+ }
+
+ if (max_fs != 0) {
+ os << ";max-fs=" << max_fs;
+ }
+
+ if (max_cpb != 0) {
+ os << ";max-cpb=" << max_cpb;
+ }
+
+ if (max_dpb != 0) {
+ os << ";max-dpb=" << max_dpb;
+ }
+
+ if (max_br != 0) {
+ os << ";max-br=" << max_br;
+ }
+ }
+
+ static const size_t max_sprop_len = 128;
+ char sprop_parameter_sets[max_sprop_len];
+ unsigned int packetization_mode;
+ bool level_asymmetry_allowed;
+ unsigned int profile_level_id;
+ unsigned int max_mbps;
+ unsigned int max_fs;
+ unsigned int max_cpb;
+ unsigned int max_dpb;
+ unsigned int max_br;
+ };
+
+ // Also used for VP9 since they share parameters
+ class VP8Parameters : public Parameters
+ {
+ public:
+ explicit VP8Parameters(SdpRtpmapAttributeList::CodecType type)
+ : Parameters(type), max_fs(0), max_fr(0)
+ {
+ }
+
+ virtual Parameters*
+ Clone() const override
+ {
+ return new VP8Parameters(*this);
+ }
+
+ virtual void
+ Serialize(std::ostream& os) const override
+ {
+ // draft-ietf-payload-vp8-11 says these are mandatory, upper layer
+ // needs to ensure they're set properly.
+ os << "max-fs=" << max_fs;
+ os << ";max-fr=" << max_fr;
+ }
+
+ unsigned int max_fs;
+ unsigned int max_fr;
+ };
+
+ class OpusParameters : public Parameters
+ {
+ public:
+ enum { kDefaultMaxPlaybackRate = 48000,
+ kDefaultStereo = 0,
+ kDefaultUseInBandFec = 0 };
+ OpusParameters() :
+ Parameters(SdpRtpmapAttributeList::kOpus),
+ maxplaybackrate(kDefaultMaxPlaybackRate),
+ stereo(kDefaultStereo),
+ useInBandFec(kDefaultUseInBandFec)
+ {}
+
+ Parameters*
+ Clone() const override
+ {
+ return new OpusParameters(*this);
+ }
+
+ void
+ Serialize(std::ostream& os) const override
+ {
+ os << "maxplaybackrate=" << maxplaybackrate
+ << ";stereo=" << stereo
+ << ";useinbandfec=" << useInBandFec;
+ }
+
+ unsigned int maxplaybackrate;
+ unsigned int stereo;
+ unsigned int useInBandFec;
+ };
+
+ class TelephoneEventParameters : public Parameters
+ {
+ public:
+ TelephoneEventParameters() :
+ Parameters(SdpRtpmapAttributeList::kTelephoneEvent),
+ dtmfTones("0-15")
+ {}
+
+ virtual Parameters*
+ Clone() const override
+ {
+ return new TelephoneEventParameters(*this);
+ }
+
+ void
+ Serialize(std::ostream& os) const override
+ {
+ os << dtmfTones;
+ }
+
+ std::string dtmfTones;
+ };
+
+ class Fmtp
+ {
+ public:
+ Fmtp(const std::string& aFormat, UniquePtr<Parameters> aParameters)
+ : format(aFormat),
+ parameters(Move(aParameters))
+ {
+ }
+
+ Fmtp(const std::string& aFormat, const Parameters& aParameters)
+ : format(aFormat),
+ parameters(aParameters.Clone())
+ {
+ }
+
+ // TODO: Rip all of this out when we have move semantics in the stl.
+ Fmtp(const Fmtp& orig) { *this = orig; }
+
+ Fmtp& operator=(const Fmtp& rhs)
+ {
+ if (this != &rhs) {
+ format = rhs.format;
+ parameters.reset(rhs.parameters ? rhs.parameters->Clone() : nullptr);
+ }
+ return *this;
+ }
+
+ // The contract around these is as follows:
+ // * |parameters| is only set if we recognized the media type and had
+ // a subclass of Parameters to represent that type of parameters
+ // * |parameters| is a best-effort representation; it might be missing
+ // stuff
+ // * Parameters::codec_type tells you the concrete class, eg
+ // kH264 -> H264Parameters
+ std::string format;
+ UniquePtr<Parameters> parameters;
+ };
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ void
+ PushEntry(const std::string& format, UniquePtr<Parameters> parameters)
+ {
+ mFmtps.push_back(Fmtp(format, Move(parameters)));
+ }
+
+ std::vector<Fmtp> mFmtps;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=sctpmap, draft-ietf-mmusic-sctp-sdp-05
+//-------------------------------------------------------------------------
+// sctpmap-attr = "a=sctpmap:" sctpmap-number media-subtypes
+// [streams]
+// sctpmap-number = 1*DIGIT
+// protocol = labelstring
+// labelstring = text
+// text = byte-string
+// streams = 1*DIGIT
+//
+// We're going to pretend that there are spaces where they make sense.
+//
+// (draft-06 is not backward compatabile and draft-07 replaced sctpmap's with
+// fmtp maps - we should carefully choose when to upgrade)
+class SdpSctpmapAttributeList : public SdpAttribute
+{
+public:
+ SdpSctpmapAttributeList() : SdpAttribute(kSctpmapAttribute) {}
+
+ struct Sctpmap {
+ std::string pt;
+ std::string name;
+ uint32_t streams;
+ };
+
+ void
+ PushEntry(const std::string& pt, const std::string& name,
+ uint32_t streams = 0)
+ {
+ Sctpmap value = { pt, name, streams };
+ mSctpmaps.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ bool
+ HasEntry(const std::string& pt) const
+ {
+ for (auto it = mSctpmaps.begin(); it != mSctpmaps.end(); ++it) {
+ if (it->pt == pt) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ const Sctpmap&
+ GetEntry(const std::string& pt) const
+ {
+ for (auto it = mSctpmaps.begin(); it != mSctpmaps.end(); ++it) {
+ if (it->pt == pt) {
+ return *it;
+ }
+ }
+ MOZ_CRASH();
+ }
+
+ std::vector<Sctpmap> mSctpmaps;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=setup, RFC4145
+//-------------------------------------------------------------------------
+// setup-attr = "a=setup:" role
+// role = "active" / "passive" / "actpass" / "holdconn"
+class SdpSetupAttribute : public SdpAttribute
+{
+public:
+ enum Role { kActive, kPassive, kActpass, kHoldconn };
+
+ explicit SdpSetupAttribute(Role role)
+ : SdpAttribute(kSetupAttribute), mRole(role)
+ {
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ Role mRole;
+};
+
+inline std::ostream& operator<<(std::ostream& os, SdpSetupAttribute::Role r)
+{
+ switch (r) {
+ case SdpSetupAttribute::kActive:
+ os << "active";
+ break;
+ case SdpSetupAttribute::kPassive:
+ os << "passive";
+ break;
+ case SdpSetupAttribute::kActpass:
+ os << "actpass";
+ break;
+ case SdpSetupAttribute::kHoldconn:
+ os << "holdconn";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+// sc-attr = "a=simulcast:" 1*2( WSP sc-str-list ) [WSP sc-pause-list]
+// sc-str-list = sc-dir WSP sc-id-type "=" sc-alt-list *( ";" sc-alt-list )
+// sc-pause-list = "paused=" sc-alt-list
+// sc-dir = "send" / "recv"
+// sc-id-type = "pt" / "rid" / token
+// sc-alt-list = sc-id *( "," sc-id )
+// sc-id = fmt / rid-identifier / token
+// ; WSP defined in [RFC5234]
+// ; fmt, token defined in [RFC4566]
+// ; rid-identifier defined in [I-D.pthatcher-mmusic-rid]
+class SdpSimulcastAttribute : public SdpAttribute
+{
+public:
+ SdpSimulcastAttribute() : SdpAttribute(kSimulcastAttribute) {}
+
+ void Serialize(std::ostream& os) const override;
+ bool Parse(std::istream& is, std::string* error);
+
+ class Version
+ {
+ public:
+ void Serialize(std::ostream& os) const;
+ bool IsSet() const
+ {
+ return !choices.empty();
+ }
+ bool Parse(std::istream& is, std::string* error);
+ bool GetChoicesAsFormats(std::vector<uint16_t>* formats) const;
+
+ std::vector<std::string> choices;
+ };
+
+ class Versions : public std::vector<Version>
+ {
+ public:
+ enum Type {
+ kPt,
+ kRid
+ };
+
+ Versions() : type(kRid) {}
+ void Serialize(std::ostream& os) const;
+ bool IsSet() const
+ {
+ if (empty()) {
+ return false;
+ }
+
+ for (const Version& version : *this) {
+ if (version.IsSet()) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool Parse(std::istream& is, std::string* error);
+ Type type;
+ };
+
+ Versions sendVersions;
+ Versions recvVersions;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=ssrc, RFC5576
+//-------------------------------------------------------------------------
+// ssrc-attr = "ssrc:" ssrc-id SP attribute
+// ; The base definition of "attribute" is in RFC 4566.
+// ; (It is the content of "a=" lines.)
+//
+// ssrc-id = integer ; 0 .. 2**32 - 1
+//-------------------------------------------------------------------------
+// TODO -- In the future, it might be nice if we ran a parse on the
+// attribute section of this so that we could interpret it semantically.
+// For WebRTC, the key use case for a=ssrc is assocaiting SSRCs with
+// media sections, and we're not really going to care about the attribute
+// itself. So we're just going to store it as a string for the time being.
+// Issue 187.
+class SdpSsrcAttributeList : public SdpAttribute
+{
+public:
+ SdpSsrcAttributeList() : SdpAttribute(kSsrcAttribute) {}
+
+ struct Ssrc {
+ uint32_t ssrc;
+ std::string attribute;
+ };
+
+ void
+ PushEntry(uint32_t ssrc, const std::string& attribute)
+ {
+ Ssrc value = { ssrc, attribute };
+ mSsrcs.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<Ssrc> mSsrcs;
+};
+
+///////////////////////////////////////////////////////////////////////////
+// a=ssrc-group, RFC5576
+//-------------------------------------------------------------------------
+// ssrc-group-attr = "ssrc-group:" semantics *(SP ssrc-id)
+//
+// semantics = "FEC" / "FID" / token
+//
+// ssrc-id = integer ; 0 .. 2**32 - 1
+class SdpSsrcGroupAttributeList : public SdpAttribute
+{
+public:
+ enum Semantics {
+ kFec, // RFC5576
+ kFid, // RFC5576
+ kFecFr, // RFC5956
+ kDup // RFC7104
+ };
+
+ struct SsrcGroup {
+ Semantics semantics;
+ std::vector<uint32_t> ssrcs;
+ };
+
+ SdpSsrcGroupAttributeList() : SdpAttribute(kSsrcGroupAttribute) {}
+
+ void
+ PushEntry(Semantics semantics, const std::vector<uint32_t>& ssrcs)
+ {
+ SsrcGroup value = { semantics, ssrcs };
+ mSsrcGroups.push_back(value);
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::vector<SsrcGroup> mSsrcGroups;
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ SdpSsrcGroupAttributeList::Semantics s)
+{
+ switch (s) {
+ case SdpSsrcGroupAttributeList::kFec:
+ os << "FEC";
+ break;
+ case SdpSsrcGroupAttributeList::kFid:
+ os << "FID";
+ break;
+ case SdpSsrcGroupAttributeList::kFecFr:
+ os << "FEC-FR";
+ break;
+ case SdpSsrcGroupAttributeList::kDup:
+ os << "DUP";
+ break;
+ default:
+ MOZ_ASSERT(false);
+ os << "?";
+ }
+ return os;
+}
+
+///////////////////////////////////////////////////////////////////////////
+class SdpMultiStringAttribute : public SdpAttribute
+{
+public:
+ explicit SdpMultiStringAttribute(AttributeType type) : SdpAttribute(type) {}
+
+ void
+ PushEntry(const std::string& entry)
+ {
+ mValues.push_back(entry);
+ }
+
+ virtual void Serialize(std::ostream& os) const;
+
+ std::vector<std::string> mValues;
+};
+
+// otherwise identical to SdpMultiStringAttribute, this is used for
+// ice-options and other places where the value is serialized onto
+// a single line with space separating tokens
+class SdpOptionsAttribute : public SdpAttribute
+{
+public:
+ explicit SdpOptionsAttribute(AttributeType type) : SdpAttribute(type) {}
+
+ void
+ PushEntry(const std::string& entry)
+ {
+ mValues.push_back(entry);
+ }
+
+ void Load(const std::string& value);
+
+ virtual void Serialize(std::ostream& os) const;
+
+ std::vector<std::string> mValues;
+};
+
+// Used for attributes that take no value (eg; a=ice-lite)
+class SdpFlagAttribute : public SdpAttribute
+{
+public:
+ explicit SdpFlagAttribute(AttributeType type) : SdpAttribute(type) {}
+
+ virtual void Serialize(std::ostream& os) const override;
+};
+
+// Used for any other kind of single-valued attribute not otherwise specialized
+class SdpStringAttribute : public SdpAttribute
+{
+public:
+ explicit SdpStringAttribute(AttributeType type, const std::string& value)
+ : SdpAttribute(type), mValue(value)
+ {
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ std::string mValue;
+};
+
+// Used for any purely (non-negative) numeric attribute
+class SdpNumberAttribute : public SdpAttribute
+{
+public:
+ explicit SdpNumberAttribute(AttributeType type, uint32_t value = 0)
+ : SdpAttribute(type), mValue(value)
+ {
+ }
+
+ virtual void Serialize(std::ostream& os) const override;
+
+ uint32_t mValue;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SdpAttributeList.h b/media/webrtc/signaling/src/sdp/SdpAttributeList.h
new file mode 100644
index 000000000..fa84efea6
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpAttributeList.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDPATTRIBUTELIST_H_
+#define _SDPATTRIBUTELIST_H_
+
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Attributes.h"
+
+#include "signaling/src/sdp/SdpAttribute.h"
+
+namespace mozilla
+{
+
+class SdpAttributeList
+{
+public:
+ virtual ~SdpAttributeList() {}
+ typedef SdpAttribute::AttributeType AttributeType;
+
+ // Avoid default params on virtual functions
+ bool
+ HasAttribute(AttributeType type) const
+ {
+ return HasAttribute(type, true);
+ }
+
+ const SdpAttribute*
+ GetAttribute(AttributeType type) const
+ {
+ return GetAttribute(type, true);
+ }
+
+ virtual bool HasAttribute(AttributeType type, bool sessionFallback) const = 0;
+ virtual const SdpAttribute* GetAttribute(AttributeType type,
+ bool sessionFallback) const = 0;
+ // The setter takes an attribute of any type, and takes ownership
+ virtual void SetAttribute(SdpAttribute* attr) = 0;
+ virtual void RemoveAttribute(AttributeType type) = 0;
+ virtual void Clear() = 0;
+
+ virtual const SdpConnectionAttribute& GetConnection() const = 0;
+ virtual const SdpOptionsAttribute& GetIceOptions() const = 0;
+ virtual const SdpRtcpAttribute& GetRtcp() const = 0;
+ virtual const SdpRemoteCandidatesAttribute& GetRemoteCandidates() const = 0;
+ virtual const SdpSetupAttribute& GetSetup() const = 0;
+ virtual const SdpDtlsMessageAttribute& GetDtlsMessage() const = 0;
+
+ // These attributes can appear multiple times, so the returned
+ // classes actually represent a collection of values.
+ virtual const std::vector<std::string>& GetCandidate() const = 0;
+ virtual const SdpExtmapAttributeList& GetExtmap() const = 0;
+ virtual const SdpFingerprintAttributeList& GetFingerprint() const = 0;
+ virtual const SdpFmtpAttributeList& GetFmtp() const = 0;
+ virtual const SdpGroupAttributeList& GetGroup() const = 0;
+ virtual const SdpImageattrAttributeList& GetImageattr() const = 0;
+ virtual const SdpSimulcastAttribute& GetSimulcast() const = 0;
+ virtual const SdpMsidAttributeList& GetMsid() const = 0;
+ virtual const SdpMsidSemanticAttributeList& GetMsidSemantic() const = 0;
+ virtual const SdpRidAttributeList& GetRid() const = 0;
+ virtual const SdpRtcpFbAttributeList& GetRtcpFb() const = 0;
+ virtual const SdpRtpmapAttributeList& GetRtpmap() const = 0;
+ virtual const SdpSctpmapAttributeList& GetSctpmap() const = 0;
+ virtual const SdpSsrcAttributeList& GetSsrc() const = 0;
+ virtual const SdpSsrcGroupAttributeList& GetSsrcGroup() const = 0;
+
+ // These attributes are effectively simple types, so we'll make life
+ // easy by just returning their value.
+ virtual const std::string& GetIcePwd() const = 0;
+ virtual const std::string& GetIceUfrag() const = 0;
+ virtual const std::string& GetIdentity() const = 0;
+ virtual const std::string& GetLabel() const = 0;
+ virtual unsigned int GetMaxptime() const = 0;
+ virtual const std::string& GetMid() const = 0;
+ virtual unsigned int GetPtime() const = 0;
+
+ // This is "special", because it's multiple things
+ virtual SdpDirectionAttribute::Direction GetDirection() const = 0;
+
+ virtual void Serialize(std::ostream&) const = 0;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SdpAttributeList& al)
+{
+ al.Serialize(os);
+ return os;
+}
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SdpEnum.h b/media/webrtc/signaling/src/sdp/SdpEnum.h
new file mode 100644
index 000000000..b4a0d16b3
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpEnum.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDPENUM_H_
+#define _SDPENUM_H_
+
+#include <ostream>
+
+#include "mozilla/Assertions.h"
+
+namespace mozilla
+{
+namespace sdp
+{
+
+enum NetType { kNetTypeNone, kInternet };
+
+inline std::ostream& operator<<(std::ostream& os, sdp::NetType t)
+{
+ switch (t) {
+ case sdp::kNetTypeNone:
+ MOZ_ASSERT(false);
+ return os << "NONE";
+ case sdp::kInternet:
+ return os << "IN";
+ }
+ MOZ_CRASH("Unknown NetType");
+}
+
+enum AddrType { kAddrTypeNone, kIPv4, kIPv6 };
+
+inline std::ostream& operator<<(std::ostream& os, sdp::AddrType t)
+{
+ switch (t) {
+ case sdp::kAddrTypeNone:
+ MOZ_ASSERT(false);
+ return os << "NONE";
+ case sdp::kIPv4:
+ return os << "IP4";
+ case sdp::kIPv6:
+ return os << "IP6";
+ }
+ MOZ_CRASH("Unknown AddrType");
+}
+
+enum Direction {
+ // Start at 1 so these can be used as flags
+ kSend = 1,
+ kRecv = 2
+};
+
+inline std::ostream& operator<<(std::ostream& os, sdp::Direction d)
+{
+ switch (d) {
+ case sdp::kSend:
+ return os << "send";
+ case sdp::kRecv:
+ return os << "recv";
+ }
+ MOZ_CRASH("Unknown Direction");
+}
+
+} // namespace sdp
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SdpErrorHolder.h b/media/webrtc/signaling/src/sdp/SdpErrorHolder.h
new file mode 100644
index 000000000..556fcefb6
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpErrorHolder.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDPERRORHOLDER_H_
+#define _SDPERRORHOLDER_H_
+
+#include <vector>
+#include <string>
+
+namespace mozilla
+{
+
+class SdpErrorHolder
+{
+public:
+ SdpErrorHolder() {}
+ virtual ~SdpErrorHolder() {}
+
+ void
+ AddParseError(size_t line, const std::string& message)
+ {
+ mErrors.push_back(std::make_pair(line, message));
+ }
+
+ void
+ ClearParseErrors()
+ {
+ mErrors.clear();
+ }
+
+ /**
+ * Returns a reference to the list of parse errors.
+ * This gets cleared out when you call Parse.
+ */
+ const std::vector<std::pair<size_t, std::string> >&
+ GetParseErrors() const
+ {
+ return mErrors;
+ }
+
+private:
+ std::vector<std::pair<size_t, std::string> > mErrors;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SdpHelper.cpp b/media/webrtc/signaling/src/sdp/SdpHelper.cpp
new file mode 100644
index 000000000..e476b29e5
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpHelper.cpp
@@ -0,0 +1,811 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SdpHelper.h"
+
+#include "signaling/src/sdp/Sdp.h"
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "logging.h"
+
+#include "nsDebug.h"
+#include "nsError.h"
+#include "prprf.h"
+
+#include <string.h>
+#include <set>
+
+namespace mozilla {
+MOZ_MTLOG_MODULE("sdp")
+
+#define SDP_SET_ERROR(error) \
+ do { \
+ std::ostringstream os; \
+ os << error; \
+ mLastError = os.str(); \
+ MOZ_MTLOG(ML_ERROR, mLastError); \
+ } while (0);
+
+nsresult
+SdpHelper::CopyTransportParams(size_t numComponents,
+ const SdpMediaSection& oldLocal,
+ SdpMediaSection* newLocal)
+{
+ // Copy over m-section details
+ newLocal->SetPort(oldLocal.GetPort());
+ newLocal->GetConnection() = oldLocal.GetConnection();
+
+ const SdpAttributeList& oldLocalAttrs = oldLocal.GetAttributeList();
+ SdpAttributeList& newLocalAttrs = newLocal->GetAttributeList();
+
+ // Now we copy over attributes that won't be added by the usual logic
+ if (oldLocalAttrs.HasAttribute(SdpAttribute::kCandidateAttribute) &&
+ numComponents) {
+ UniquePtr<SdpMultiStringAttribute> candidateAttrs(
+ new SdpMultiStringAttribute(SdpAttribute::kCandidateAttribute));
+ for (const std::string& candidate : oldLocalAttrs.GetCandidate()) {
+ size_t component;
+ nsresult rv = GetComponent(candidate, &component);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (numComponents >= component) {
+ candidateAttrs->mValues.push_back(candidate);
+ }
+ }
+ if (candidateAttrs->mValues.size()) {
+ newLocalAttrs.SetAttribute(candidateAttrs.release());
+ }
+ }
+
+ if (numComponents == 2 &&
+ oldLocalAttrs.HasAttribute(SdpAttribute::kRtcpAttribute)) {
+ // copy rtcp attribute if we had one that we are using
+ newLocalAttrs.SetAttribute(new SdpRtcpAttribute(oldLocalAttrs.GetRtcp()));
+ }
+
+ return NS_OK;
+}
+
+bool
+SdpHelper::AreOldTransportParamsValid(const Sdp& oldAnswer,
+ const Sdp& offerersPreviousSdp,
+ const Sdp& newOffer,
+ size_t level)
+{
+ if (MsectionIsDisabled(oldAnswer.GetMediaSection(level)) ||
+ MsectionIsDisabled(newOffer.GetMediaSection(level))) {
+ // Obvious
+ return false;
+ }
+
+ if (IsBundleSlave(oldAnswer, level)) {
+ // The transport attributes on this m-section were thrown away, because it
+ // was bundled.
+ return false;
+ }
+
+ if (newOffer.GetMediaSection(level).GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute) &&
+ IsBundleSlave(newOffer, level)) {
+ // It never makes sense to put transport attributes in a bundle-only
+ // m-section
+ return false;
+ }
+
+ if (IceCredentialsDiffer(newOffer.GetMediaSection(level),
+ offerersPreviousSdp.GetMediaSection(level))) {
+ return false;
+ }
+
+ return true;
+}
+
+bool
+SdpHelper::IceCredentialsDiffer(const SdpMediaSection& msection1,
+ const SdpMediaSection& msection2)
+{
+ const SdpAttributeList& attrs1(msection1.GetAttributeList());
+ const SdpAttributeList& attrs2(msection2.GetAttributeList());
+
+ if ((attrs1.GetIceUfrag() != attrs2.GetIceUfrag()) ||
+ (attrs1.GetIcePwd() != attrs2.GetIcePwd())) {
+ return true;
+ }
+
+ return false;
+}
+
+nsresult
+SdpHelper::GetComponent(const std::string& candidate, size_t* component)
+{
+ unsigned int temp;
+ int32_t result = PR_sscanf(candidate.c_str(), "%*s %u", &temp);
+ if (result == 1) {
+ *component = temp;
+ return NS_OK;
+ }
+ SDP_SET_ERROR("Malformed ICE candidate: " << candidate);
+ return NS_ERROR_INVALID_ARG;
+}
+
+bool
+SdpHelper::MsectionIsDisabled(const SdpMediaSection& msection) const
+{
+ return !msection.GetPort() &&
+ !msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute);
+}
+
+void
+SdpHelper::DisableMsection(Sdp* sdp, SdpMediaSection* msection)
+{
+ // Make sure to remove the mid from any group attributes
+ if (msection->GetAttributeList().HasAttribute(SdpAttribute::kMidAttribute)) {
+ std::string mid = msection->GetAttributeList().GetMid();
+ if (sdp->GetAttributeList().HasAttribute(SdpAttribute::kGroupAttribute)) {
+ UniquePtr<SdpGroupAttributeList> newGroupAttr(new SdpGroupAttributeList(
+ sdp->GetAttributeList().GetGroup()));
+ newGroupAttr->RemoveMid(mid);
+ sdp->GetAttributeList().SetAttribute(newGroupAttr.release());
+ }
+ }
+
+ // Clear out attributes.
+ msection->GetAttributeList().Clear();
+
+ auto* direction =
+ new SdpDirectionAttribute(SdpDirectionAttribute::kInactive);
+ msection->GetAttributeList().SetAttribute(direction);
+ msection->SetPort(0);
+
+ msection->ClearCodecs();
+
+ auto mediaType = msection->GetMediaType();
+ switch (mediaType) {
+ case SdpMediaSection::kAudio:
+ msection->AddCodec("0", "PCMU", 8000, 1);
+ break;
+ case SdpMediaSection::kVideo:
+ msection->AddCodec("120", "VP8", 90000, 1);
+ break;
+ case SdpMediaSection::kApplication:
+ msection->AddDataChannel("5000", "rejected", 0);
+ break;
+ default:
+ // We need to have something here to fit the grammar, this seems safe
+ // and 19 is a reserved payload type which should not be used by anyone.
+ msection->AddCodec("19", "reserved", 8000, 1);
+ }
+}
+
+void
+SdpHelper::GetBundleGroups(
+ const Sdp& sdp,
+ std::vector<SdpGroupAttributeList::Group>* bundleGroups) const
+{
+ if (sdp.GetAttributeList().HasAttribute(SdpAttribute::kGroupAttribute)) {
+ for (auto& group : sdp.GetAttributeList().GetGroup().mGroups) {
+ if (group.semantics == SdpGroupAttributeList::kBundle) {
+ bundleGroups->push_back(group);
+ }
+ }
+ }
+}
+
+nsresult
+SdpHelper::GetBundledMids(const Sdp& sdp, BundledMids* bundledMids)
+{
+ std::vector<SdpGroupAttributeList::Group> bundleGroups;
+ GetBundleGroups(sdp, &bundleGroups);
+
+ for (SdpGroupAttributeList::Group& group : bundleGroups) {
+ if (group.tags.empty()) {
+ SDP_SET_ERROR("Empty BUNDLE group");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const SdpMediaSection* masterBundleMsection(
+ FindMsectionByMid(sdp, group.tags[0]));
+
+ if (!masterBundleMsection) {
+ SDP_SET_ERROR("mid specified for bundle transport in group attribute"
+ " does not exist in the SDP. (mid=" << group.tags[0] << ")");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (MsectionIsDisabled(*masterBundleMsection)) {
+ SDP_SET_ERROR("mid specified for bundle transport in group attribute"
+ " points at a disabled m-section. (mid=" << group.tags[0] << ")");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ for (const std::string& mid : group.tags) {
+ if (bundledMids->count(mid)) {
+ SDP_SET_ERROR("mid \'" << mid << "\' appears more than once in a "
+ "BUNDLE group");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ (*bundledMids)[mid] = masterBundleMsection;
+ }
+ }
+
+ return NS_OK;
+}
+
+bool
+SdpHelper::IsBundleSlave(const Sdp& sdp, uint16_t level)
+{
+ auto& msection = sdp.GetMediaSection(level);
+
+ if (!msection.GetAttributeList().HasAttribute(SdpAttribute::kMidAttribute)) {
+ // No mid, definitely no bundle for this m-section
+ return false;
+ }
+ std::string mid(msection.GetAttributeList().GetMid());
+
+ BundledMids bundledMids;
+ nsresult rv = GetBundledMids(sdp, &bundledMids);
+ if (NS_FAILED(rv)) {
+ // Should have been caught sooner.
+ MOZ_ASSERT(false);
+ return false;
+ }
+
+ if (bundledMids.count(mid) && level != bundledMids[mid]->GetLevel()) {
+ // mid is bundled, and isn't the bundle m-section
+ return true;
+ }
+
+ return false;
+}
+
+nsresult
+SdpHelper::GetMidFromLevel(const Sdp& sdp,
+ uint16_t level,
+ std::string* mid)
+{
+ if (level >= sdp.GetMediaSectionCount()) {
+ SDP_SET_ERROR("Index " << level << " out of range");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const SdpMediaSection& msection = sdp.GetMediaSection(level);
+ const SdpAttributeList& attrList = msection.GetAttributeList();
+
+ // grab the mid and set the outparam
+ if (attrList.HasAttribute(SdpAttribute::kMidAttribute)) {
+ *mid = attrList.GetMid();
+ }
+
+ return NS_OK;
+}
+
+nsresult
+SdpHelper::AddCandidateToSdp(Sdp* sdp,
+ const std::string& candidateUntrimmed,
+ const std::string& mid,
+ uint16_t level)
+{
+
+ if (level >= sdp->GetMediaSectionCount()) {
+ SDP_SET_ERROR("Index " << level << " out of range");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // Trim off '[a=]candidate:'
+ size_t begin = candidateUntrimmed.find(':');
+ if (begin == std::string::npos) {
+ SDP_SET_ERROR("Invalid candidate, no ':' (" << candidateUntrimmed << ")");
+ return NS_ERROR_INVALID_ARG;
+ }
+ ++begin;
+
+ std::string candidate = candidateUntrimmed.substr(begin);
+
+ // https://tools.ietf.org/html/draft-ietf-rtcweb-jsep-11#section-3.4.2.1
+ // Implementations receiving an ICE Candidate object MUST use the MID if
+ // present, or the m= line index, if not (as it could have come from a
+ // non-JSEP endpoint). (bug 1095793)
+ SdpMediaSection* msection = 0;
+ if (!mid.empty()) {
+ // FindMsectionByMid could return nullptr
+ msection = FindMsectionByMid(*sdp, mid);
+
+ // Check to make sure mid matches what we'd get by
+ // looking up the m= line using the level. (mjf)
+ std::string checkMid;
+ nsresult rv = GetMidFromLevel(*sdp, level, &checkMid);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (mid != checkMid) {
+ SDP_SET_ERROR("Mismatch between mid and level - \"" << mid
+ << "\" is not the mid for level " << level
+ << "; \"" << checkMid << "\" is");
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+ if (!msection) {
+ msection = &(sdp->GetMediaSection(level));
+ }
+
+ SdpAttributeList& attrList = msection->GetAttributeList();
+
+ UniquePtr<SdpMultiStringAttribute> candidates;
+ if (!attrList.HasAttribute(SdpAttribute::kCandidateAttribute)) {
+ // Create new
+ candidates.reset(
+ new SdpMultiStringAttribute(SdpAttribute::kCandidateAttribute));
+ } else {
+ // Copy existing
+ candidates.reset(new SdpMultiStringAttribute(
+ *static_cast<const SdpMultiStringAttribute*>(
+ attrList.GetAttribute(SdpAttribute::kCandidateAttribute))));
+ }
+ candidates->PushEntry(candidate);
+ attrList.SetAttribute(candidates.release());
+
+ return NS_OK;
+}
+
+void
+SdpHelper::SetIceGatheringComplete(Sdp* sdp,
+ uint16_t level,
+ BundledMids bundledMids)
+{
+ SdpMediaSection& msection = sdp->GetMediaSection(level);
+
+ if (kSlaveBundle == GetMsectionBundleType(*sdp,
+ level,
+ bundledMids,
+ nullptr)) {
+ return; // Slave bundle m-section. Skip.
+ }
+
+ SdpAttributeList& attrs = msection.GetAttributeList();
+ attrs.SetAttribute(
+ new SdpFlagAttribute(SdpAttribute::kEndOfCandidatesAttribute));
+ // Remove trickle-ice option
+ attrs.RemoveAttribute(SdpAttribute::kIceOptionsAttribute);
+}
+
+void
+SdpHelper::SetDefaultAddresses(const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ Sdp* sdp,
+ uint16_t level,
+ BundledMids bundledMids)
+{
+ SdpMediaSection& msection = sdp->GetMediaSection(level);
+ std::string masterMid;
+
+ MsectionBundleType bundleType = GetMsectionBundleType(*sdp,
+ level,
+ bundledMids,
+ &masterMid);
+ if (kSlaveBundle == bundleType) {
+ return; // Slave bundle m-section. Skip.
+ }
+ if (kMasterBundle == bundleType) {
+ // Master bundle m-section. Set defaultCandidateAddr and
+ // defaultCandidatePort on all bundled m-sections.
+ const SdpMediaSection* masterBundleMsection(bundledMids[masterMid]);
+ for (auto i = bundledMids.begin(); i != bundledMids.end(); ++i) {
+ if (i->second != masterBundleMsection) {
+ continue;
+ }
+ SdpMediaSection* bundledMsection = FindMsectionByMid(*sdp, i->first);
+ if (!bundledMsection) {
+ MOZ_ASSERT(false);
+ continue;
+ }
+ SetDefaultAddresses(defaultCandidateAddr,
+ defaultCandidatePort,
+ defaultRtcpCandidateAddr,
+ defaultRtcpCandidatePort,
+ bundledMsection);
+ }
+ }
+
+ SetDefaultAddresses(defaultCandidateAddr,
+ defaultCandidatePort,
+ defaultRtcpCandidateAddr,
+ defaultRtcpCandidatePort,
+ &msection);
+}
+
+void
+SdpHelper::SetDefaultAddresses(const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ SdpMediaSection* msection)
+{
+ msection->GetConnection().SetAddress(defaultCandidateAddr);
+ msection->SetPort(defaultCandidatePort);
+
+ if (!defaultRtcpCandidateAddr.empty()) {
+ sdp::AddrType ipVersion = sdp::kIPv4;
+ if (defaultRtcpCandidateAddr.find(':') != std::string::npos) {
+ ipVersion = sdp::kIPv6;
+ }
+ msection->GetAttributeList().SetAttribute(new SdpRtcpAttribute(
+ defaultRtcpCandidatePort,
+ sdp::kInternet,
+ ipVersion,
+ defaultRtcpCandidateAddr));
+ }
+}
+
+nsresult
+SdpHelper::GetIdsFromMsid(const Sdp& sdp,
+ const SdpMediaSection& msection,
+ std::string* streamId,
+ std::string* trackId)
+{
+ if (!sdp.GetAttributeList().HasAttribute(
+ SdpAttribute::kMsidSemanticAttribute)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ auto& msidSemantics = sdp.GetAttributeList().GetMsidSemantic().mMsidSemantics;
+ std::vector<SdpMsidAttributeList::Msid> allMsids;
+ nsresult rv = GetMsids(msection, &allMsids);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ bool allMsidsAreWebrtc = false;
+ std::set<std::string> webrtcMsids;
+
+ for (auto i = msidSemantics.begin(); i != msidSemantics.end(); ++i) {
+ if (i->semantic == "WMS") {
+ for (auto j = i->msids.begin(); j != i->msids.end(); ++j) {
+ if (*j == "*") {
+ allMsidsAreWebrtc = true;
+ } else {
+ webrtcMsids.insert(*j);
+ }
+ }
+ break;
+ }
+ }
+
+ bool found = false;
+
+ for (auto i = allMsids.begin(); i != allMsids.end(); ++i) {
+ if (allMsidsAreWebrtc || webrtcMsids.count(i->identifier)) {
+ if (i->appdata.empty()) {
+ SDP_SET_ERROR("Invalid webrtc msid at level " << msection.GetLevel()
+ << ": Missing track id.");
+ return NS_ERROR_INVALID_ARG;
+ }
+ if (!found) {
+ *streamId = i->identifier;
+ *trackId = i->appdata;
+ found = true;
+ } else if ((*streamId != i->identifier) || (*trackId != i->appdata)) {
+ SDP_SET_ERROR("Found multiple different webrtc msids in m-section "
+ << msection.GetLevel() << ". The behavior here is "
+ "undefined.");
+ return NS_ERROR_INVALID_ARG;
+ }
+ }
+ }
+
+ if (!found) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+SdpHelper::GetMsids(const SdpMediaSection& msection,
+ std::vector<SdpMsidAttributeList::Msid>* msids)
+{
+ if (msection.GetAttributeList().HasAttribute(SdpAttribute::kMsidAttribute)) {
+ *msids = msection.GetAttributeList().GetMsid().mMsids;
+ }
+
+ // Can we find some additional msids in ssrc attributes?
+ // (Chrome does not put plain-old msid attributes in its SDP)
+ if (msection.GetAttributeList().HasAttribute(SdpAttribute::kSsrcAttribute)) {
+ auto& ssrcs = msection.GetAttributeList().GetSsrc().mSsrcs;
+
+ for (auto i = ssrcs.begin(); i != ssrcs.end(); ++i) {
+ if (i->attribute.find("msid:") == 0) {
+ std::string streamId;
+ std::string trackId;
+ nsresult rv = ParseMsid(i->attribute, &streamId, &trackId);
+ NS_ENSURE_SUCCESS(rv, rv);
+ msids->push_back({streamId, trackId});
+ }
+ }
+ }
+
+ return NS_OK;
+}
+
+nsresult
+SdpHelper::ParseMsid(const std::string& msidAttribute,
+ std::string* streamId,
+ std::string* trackId)
+{
+ // Would be nice if SdpSsrcAttributeList could parse out the contained
+ // attribute, but at least the parse here is simple.
+ // We are being very forgiving here wrt whitespace; tabs are not actually
+ // allowed, nor is leading/trailing whitespace.
+ size_t streamIdStart = msidAttribute.find_first_not_of(" \t", 5);
+ // We do not assume the appdata token is here, since this is not
+ // necessarily a webrtc msid
+ if (streamIdStart == std::string::npos) {
+ SDP_SET_ERROR("Malformed source-level msid attribute: "
+ << msidAttribute);
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ size_t streamIdEnd = msidAttribute.find_first_of(" \t", streamIdStart);
+ if (streamIdEnd == std::string::npos) {
+ streamIdEnd = msidAttribute.size();
+ }
+
+ size_t trackIdStart =
+ msidAttribute.find_first_not_of(" \t", streamIdEnd);
+ if (trackIdStart == std::string::npos) {
+ trackIdStart = msidAttribute.size();
+ }
+
+ size_t trackIdEnd = msidAttribute.find_first_of(" \t", trackIdStart);
+ if (trackIdEnd == std::string::npos) {
+ trackIdEnd = msidAttribute.size();
+ }
+
+ size_t streamIdSize = streamIdEnd - streamIdStart;
+ size_t trackIdSize = trackIdEnd - trackIdStart;
+
+ *streamId = msidAttribute.substr(streamIdStart, streamIdSize);
+ *trackId = msidAttribute.substr(trackIdStart, trackIdSize);
+ return NS_OK;
+}
+
+void
+SdpHelper::SetupMsidSemantic(const std::vector<std::string>& msids,
+ Sdp* sdp) const
+{
+ if (!msids.empty()) {
+ UniquePtr<SdpMsidSemanticAttributeList> msidSemantics(
+ new SdpMsidSemanticAttributeList);
+ msidSemantics->PushEntry("WMS", msids);
+ sdp->GetAttributeList().SetAttribute(msidSemantics.release());
+ }
+}
+
+std::string
+SdpHelper::GetCNAME(const SdpMediaSection& msection) const
+{
+ if (msection.GetAttributeList().HasAttribute(SdpAttribute::kSsrcAttribute)) {
+ auto& ssrcs = msection.GetAttributeList().GetSsrc().mSsrcs;
+ for (auto i = ssrcs.begin(); i != ssrcs.end(); ++i) {
+ if (i->attribute.find("cname:") == 0) {
+ return i->attribute.substr(6);
+ }
+ }
+ }
+ return "";
+}
+
+const SdpMediaSection*
+SdpHelper::FindMsectionByMid(const Sdp& sdp,
+ const std::string& mid) const
+{
+ for (size_t i = 0; i < sdp.GetMediaSectionCount(); ++i) {
+ auto& attrs = sdp.GetMediaSection(i).GetAttributeList();
+ if (attrs.HasAttribute(SdpAttribute::kMidAttribute) &&
+ attrs.GetMid() == mid) {
+ return &sdp.GetMediaSection(i);
+ }
+ }
+ return nullptr;
+}
+
+SdpMediaSection*
+SdpHelper::FindMsectionByMid(Sdp& sdp,
+ const std::string& mid) const
+{
+ for (size_t i = 0; i < sdp.GetMediaSectionCount(); ++i) {
+ auto& attrs = sdp.GetMediaSection(i).GetAttributeList();
+ if (attrs.HasAttribute(SdpAttribute::kMidAttribute) &&
+ attrs.GetMid() == mid) {
+ return &sdp.GetMediaSection(i);
+ }
+ }
+ return nullptr;
+}
+
+nsresult
+SdpHelper::CopyStickyParams(const SdpMediaSection& source,
+ SdpMediaSection* dest)
+{
+ auto& sourceAttrs = source.GetAttributeList();
+ auto& destAttrs = dest->GetAttributeList();
+
+ // There's no reason to renegotiate rtcp-mux
+ if (sourceAttrs.HasAttribute(SdpAttribute::kRtcpMuxAttribute)) {
+ destAttrs.SetAttribute(
+ new SdpFlagAttribute(SdpAttribute::kRtcpMuxAttribute));
+ }
+
+ // mid should stay the same
+ if (sourceAttrs.HasAttribute(SdpAttribute::kMidAttribute)) {
+ destAttrs.SetAttribute(
+ new SdpStringAttribute(SdpAttribute::kMidAttribute,
+ sourceAttrs.GetMid()));
+ }
+
+ return NS_OK;
+}
+
+bool
+SdpHelper::HasRtcp(SdpMediaSection::Protocol proto) const
+{
+ switch (proto) {
+ case SdpMediaSection::kRtpAvpf:
+ case SdpMediaSection::kDccpRtpAvpf:
+ case SdpMediaSection::kDccpRtpSavpf:
+ case SdpMediaSection::kRtpSavpf:
+ case SdpMediaSection::kUdpTlsRtpSavpf:
+ case SdpMediaSection::kTcpTlsRtpSavpf:
+ case SdpMediaSection::kDccpTlsRtpSavpf:
+ return true;
+ case SdpMediaSection::kRtpAvp:
+ case SdpMediaSection::kUdp:
+ case SdpMediaSection::kVat:
+ case SdpMediaSection::kRtp:
+ case SdpMediaSection::kUdptl:
+ case SdpMediaSection::kTcp:
+ case SdpMediaSection::kTcpRtpAvp:
+ case SdpMediaSection::kRtpSavp:
+ case SdpMediaSection::kTcpBfcp:
+ case SdpMediaSection::kTcpTlsBfcp:
+ case SdpMediaSection::kTcpTls:
+ case SdpMediaSection::kFluteUdp:
+ case SdpMediaSection::kTcpMsrp:
+ case SdpMediaSection::kTcpTlsMsrp:
+ case SdpMediaSection::kDccp:
+ case SdpMediaSection::kDccpRtpAvp:
+ case SdpMediaSection::kDccpRtpSavp:
+ case SdpMediaSection::kUdpTlsRtpSavp:
+ case SdpMediaSection::kTcpTlsRtpSavp:
+ case SdpMediaSection::kDccpTlsRtpSavp:
+ case SdpMediaSection::kUdpMbmsFecRtpAvp:
+ case SdpMediaSection::kUdpMbmsFecRtpSavp:
+ case SdpMediaSection::kUdpMbmsRepair:
+ case SdpMediaSection::kFecUdp:
+ case SdpMediaSection::kUdpFec:
+ case SdpMediaSection::kTcpMrcpv2:
+ case SdpMediaSection::kTcpTlsMrcpv2:
+ case SdpMediaSection::kPstn:
+ case SdpMediaSection::kUdpTlsUdptl:
+ case SdpMediaSection::kSctp:
+ case SdpMediaSection::kSctpDtls:
+ case SdpMediaSection::kDtlsSctp:
+ return false;
+ }
+ MOZ_CRASH("Unknown protocol, probably corruption.");
+}
+
+SdpMediaSection::Protocol
+SdpHelper::GetProtocolForMediaType(SdpMediaSection::MediaType type)
+{
+ if (type == SdpMediaSection::kApplication) {
+ return SdpMediaSection::kDtlsSctp;
+ }
+
+ return SdpMediaSection::kUdpTlsRtpSavpf;
+}
+
+void
+SdpHelper::appendSdpParseErrors(
+ const std::vector<std::pair<size_t, std::string> >& aErrors,
+ std::string* aErrorString)
+{
+ std::ostringstream os;
+ for (auto i = aErrors.begin(); i != aErrors.end(); ++i) {
+ os << "SDP Parse Error on line " << i->first << ": " + i->second
+ << std::endl;
+ }
+ *aErrorString += os.str();
+}
+
+/* static */ bool
+SdpHelper::GetPtAsInt(const std::string& ptString, uint16_t* ptOutparam)
+{
+ char* end;
+ unsigned long pt = strtoul(ptString.c_str(), &end, 10);
+ size_t length = static_cast<size_t>(end - ptString.c_str());
+ if ((pt > UINT16_MAX) || (length != ptString.size())) {
+ return false;
+ }
+ *ptOutparam = pt;
+ return true;
+}
+
+void
+SdpHelper::AddCommonExtmaps(
+ const SdpMediaSection& remoteMsection,
+ const std::vector<SdpExtmapAttributeList::Extmap>& localExtensions,
+ SdpMediaSection* localMsection)
+{
+ if (!remoteMsection.GetAttributeList().HasAttribute(
+ SdpAttribute::kExtmapAttribute)) {
+ return;
+ }
+
+ UniquePtr<SdpExtmapAttributeList> localExtmap(new SdpExtmapAttributeList);
+ auto& theirExtmap = remoteMsection.GetAttributeList().GetExtmap().mExtmaps;
+ for (auto i = theirExtmap.begin(); i != theirExtmap.end(); ++i) {
+ for (auto j = localExtensions.begin(); j != localExtensions.end(); ++j) {
+ // verify we have a valid combination of directions. For kInactive
+ // we'll just not add the response
+ if (i->extensionname == j->extensionname &&
+ (((i->direction == SdpDirectionAttribute::Direction::kSendrecv ||
+ i->direction == SdpDirectionAttribute::Direction::kSendonly) &&
+ (j->direction == SdpDirectionAttribute::Direction::kSendrecv ||
+ j->direction == SdpDirectionAttribute::Direction::kRecvonly)) ||
+
+ ((i->direction == SdpDirectionAttribute::Direction::kSendrecv ||
+ i->direction == SdpDirectionAttribute::Direction::kRecvonly) &&
+ (j->direction == SdpDirectionAttribute::Direction::kSendrecv ||
+ j->direction == SdpDirectionAttribute::Direction::kSendonly)))) {
+ auto k = *i; // we need to modify it
+ if (j->direction == SdpDirectionAttribute::Direction::kSendonly) {
+ k.direction = SdpDirectionAttribute::Direction::kRecvonly;
+ } else if (j->direction == SdpDirectionAttribute::Direction::kRecvonly) {
+ k.direction = SdpDirectionAttribute::Direction::kSendonly;
+ }
+ localExtmap->mExtmaps.push_back(k);
+
+ // RFC 5285 says that ids >= 4096 can be used by the offerer to
+ // force the answerer to pick, otherwise the value in the offer is
+ // used.
+ if (localExtmap->mExtmaps.back().entry >= 4096) {
+ localExtmap->mExtmaps.back().entry = j->entry;
+ }
+ }
+ }
+ }
+
+ if (!localExtmap->mExtmaps.empty()) {
+ localMsection->GetAttributeList().SetAttribute(localExtmap.release());
+ }
+}
+
+SdpHelper::MsectionBundleType
+SdpHelper::GetMsectionBundleType(const Sdp& sdp,
+ uint16_t level,
+ BundledMids& bundledMids,
+ std::string* masterMid) const
+{
+ const SdpMediaSection& msection = sdp.GetMediaSection(level);
+ if (msection.GetAttributeList().HasAttribute(SdpAttribute::kMidAttribute)) {
+ std::string mid(msection.GetAttributeList().GetMid());
+ if (bundledMids.count(mid)) {
+ const SdpMediaSection* masterBundleMsection(bundledMids[mid]);
+ if (msection.GetLevel() != masterBundleMsection->GetLevel()) {
+ return kSlaveBundle;
+ }
+
+ // allow the caller not to care about the masterMid
+ if (masterMid) {
+ *masterMid = mid;
+ }
+ return kMasterBundle;
+ }
+ }
+ return kNoBundle;
+}
+
+} // namespace mozilla
+
+
diff --git a/media/webrtc/signaling/src/sdp/SdpHelper.h b/media/webrtc/signaling/src/sdp/SdpHelper.h
new file mode 100644
index 000000000..21940f280
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpHelper.h
@@ -0,0 +1,131 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDPHELPER_H_
+#define _SDPHELPER_H_
+
+#include "nsError.h"
+
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/sdp/SdpAttribute.h"
+
+#include "m_cpp_utils.h"
+
+#include <string>
+#include <map>
+#include <vector>
+
+namespace mozilla {
+class SdpMediaSection;
+class Sdp;
+
+class SdpHelper {
+ public:
+ enum MsectionBundleType {
+ kNoBundle,
+ kSlaveBundle,
+ kMasterBundle
+ };
+
+ // Takes a std::string* into which error strings will be written for the
+ // lifetime of the SdpHelper.
+ explicit SdpHelper(std::string* errorDest) : mLastError(*errorDest) {}
+ ~SdpHelper() {}
+
+ nsresult GetComponent(const std::string& candidate, size_t* component);
+ nsresult CopyTransportParams(size_t numComponents,
+ const SdpMediaSection& source,
+ SdpMediaSection* dest);
+ bool AreOldTransportParamsValid(const Sdp& oldAnswer,
+ const Sdp& offerersPreviousSdp,
+ const Sdp& newOffer,
+ size_t level);
+ bool IceCredentialsDiffer(const SdpMediaSection& msection1,
+ const SdpMediaSection& msection2);
+
+ bool MsectionIsDisabled(const SdpMediaSection& msection) const;
+ static void DisableMsection(Sdp* sdp, SdpMediaSection* msection);
+
+ // Maps each mid to the m-section that is the master of its bundle.
+ // Mids that do not appear in an a=group:BUNDLE do not appear here.
+ typedef std::map<std::string, const SdpMediaSection*> BundledMids;
+
+ nsresult GetBundledMids(const Sdp& sdp, BundledMids* bundledMids);
+
+ bool IsBundleSlave(const Sdp& localSdp, uint16_t level);
+ void GetBundleGroups(
+ const Sdp& sdp,
+ std::vector<SdpGroupAttributeList::Group>* groups) const;
+
+ nsresult GetMidFromLevel(const Sdp& sdp,
+ uint16_t level,
+ std::string* mid);
+ nsresult GetIdsFromMsid(const Sdp& sdp,
+ const SdpMediaSection& msection,
+ std::string* streamId,
+ std::string* trackId);
+ nsresult GetMsids(const SdpMediaSection& msection,
+ std::vector<SdpMsidAttributeList::Msid>* msids);
+ nsresult ParseMsid(const std::string& msidAttribute,
+ std::string* streamId,
+ std::string* trackId);
+ nsresult AddCandidateToSdp(Sdp* sdp,
+ const std::string& candidate,
+ const std::string& mid,
+ uint16_t level);
+ void SetIceGatheringComplete(Sdp* sdp,
+ uint16_t level,
+ BundledMids bundledMids);
+ void SetDefaultAddresses(const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ Sdp* sdp,
+ uint16_t level,
+ BundledMids bundledMids);
+ void SetDefaultAddresses(const std::string& defaultCandidateAddr,
+ uint16_t defaultCandidatePort,
+ const std::string& defaultRtcpCandidateAddr,
+ uint16_t defaultRtcpCandidatePort,
+ SdpMediaSection* msection);
+ void SetupMsidSemantic(const std::vector<std::string>& msids,
+ Sdp* sdp) const;
+ MsectionBundleType GetMsectionBundleType(const Sdp& sdp,
+ uint16_t level,
+ BundledMids& bundledMids,
+ std::string* masterMid) const;
+
+ std::string GetCNAME(const SdpMediaSection& msection) const;
+
+ SdpMediaSection* FindMsectionByMid(Sdp& sdp,
+ const std::string& mid) const;
+
+ const SdpMediaSection* FindMsectionByMid(const Sdp& sdp,
+ const std::string& mid) const;
+
+ nsresult CopyStickyParams(const SdpMediaSection& source,
+ SdpMediaSection* dest);
+ bool HasRtcp(SdpMediaSection::Protocol proto) const;
+ static SdpMediaSection::Protocol GetProtocolForMediaType(
+ SdpMediaSection::MediaType type);
+ void appendSdpParseErrors(
+ const std::vector<std::pair<size_t, std::string> >& aErrors,
+ std::string* aErrorString);
+
+ static bool GetPtAsInt(const std::string& ptString, uint16_t* ptOutparam);
+
+ void AddCommonExtmaps(
+ const SdpMediaSection& remoteMsection,
+ const std::vector<SdpExtmapAttributeList::Extmap>& localExtensions,
+ SdpMediaSection* localMsection);
+
+ private:
+ std::string& mLastError;
+
+ DISALLOW_COPY_ASSIGN(SdpHelper);
+};
+} // namespace mozilla
+
+#endif // _SDPHELPER_H_
+
diff --git a/media/webrtc/signaling/src/sdp/SdpMediaSection.cpp b/media/webrtc/signaling/src/sdp/SdpMediaSection.cpp
new file mode 100644
index 000000000..bd3fab555
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpMediaSection.cpp
@@ -0,0 +1,196 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SdpMediaSection.h"
+
+namespace mozilla
+{
+const SdpFmtpAttributeList::Parameters*
+SdpMediaSection::FindFmtp(const std::string& pt) const
+{
+ const SdpAttributeList& attrs = GetAttributeList();
+
+ if (attrs.HasAttribute(SdpAttribute::kFmtpAttribute)) {
+ for (auto& fmtpAttr : attrs.GetFmtp().mFmtps) {
+ if (fmtpAttr.format == pt && fmtpAttr.parameters) {
+ return fmtpAttr.parameters.get();
+ }
+ }
+ }
+ return nullptr;
+}
+
+void
+SdpMediaSection::SetFmtp(const SdpFmtpAttributeList::Fmtp& fmtpToSet)
+{
+ UniquePtr<SdpFmtpAttributeList> fmtps(new SdpFmtpAttributeList);
+
+ if (GetAttributeList().HasAttribute(SdpAttribute::kFmtpAttribute)) {
+ *fmtps = GetAttributeList().GetFmtp();
+ }
+
+ bool found = false;
+ for (SdpFmtpAttributeList::Fmtp& fmtp : fmtps->mFmtps) {
+ if (fmtp.format == fmtpToSet.format) {
+ fmtp = fmtpToSet;
+ found = true;
+ }
+ }
+
+ if (!found) {
+ fmtps->mFmtps.push_back(fmtpToSet);
+ }
+
+ GetAttributeList().SetAttribute(fmtps.release());
+}
+
+void
+SdpMediaSection::RemoveFmtp(const std::string& pt)
+{
+ UniquePtr<SdpFmtpAttributeList> fmtps(new SdpFmtpAttributeList);
+
+ SdpAttributeList& attrList = GetAttributeList();
+ if (attrList.HasAttribute(SdpAttribute::kFmtpAttribute)) {
+ *fmtps = attrList.GetFmtp();
+ }
+
+ for (size_t i = 0; i < fmtps->mFmtps.size(); ++i) {
+ if (pt == fmtps->mFmtps[i].format) {
+ fmtps->mFmtps.erase(fmtps->mFmtps.begin() + i);
+ break;
+ }
+ }
+
+ attrList.SetAttribute(fmtps.release());
+}
+
+const SdpRtpmapAttributeList::Rtpmap*
+SdpMediaSection::FindRtpmap(const std::string& pt) const
+{
+ auto& attrs = GetAttributeList();
+ if (!attrs.HasAttribute(SdpAttribute::kRtpmapAttribute)) {
+ return nullptr;
+ }
+
+ const SdpRtpmapAttributeList& rtpmap = attrs.GetRtpmap();
+ if (!rtpmap.HasEntry(pt)) {
+ return nullptr;
+ }
+
+ return &rtpmap.GetEntry(pt);
+}
+
+const SdpSctpmapAttributeList::Sctpmap*
+SdpMediaSection::FindSctpmap(const std::string& pt) const
+{
+ auto& attrs = GetAttributeList();
+ if (!attrs.HasAttribute(SdpAttribute::kSctpmapAttribute)) {
+ return nullptr;
+ }
+
+ const SdpSctpmapAttributeList& sctpmap = attrs.GetSctpmap();
+ if (!sctpmap.HasEntry(pt)) {
+ return nullptr;
+ }
+
+ return &sctpmap.GetEntry(pt);
+}
+
+bool
+SdpMediaSection::HasRtcpFb(const std::string& pt,
+ SdpRtcpFbAttributeList::Type type,
+ const std::string& subType) const
+{
+ const SdpAttributeList& attrs(GetAttributeList());
+
+ if (!attrs.HasAttribute(SdpAttribute::kRtcpFbAttribute)) {
+ return false;
+ }
+
+ for (auto& rtcpfb : attrs.GetRtcpFb().mFeedbacks) {
+ if (rtcpfb.type == type) {
+ if (rtcpfb.pt == "*" || rtcpfb.pt == pt) {
+ if (rtcpfb.parameter == subType) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+SdpRtcpFbAttributeList
+SdpMediaSection::GetRtcpFbs() const
+{
+ SdpRtcpFbAttributeList result;
+ if (GetAttributeList().HasAttribute(SdpAttribute::kRtcpFbAttribute)) {
+ result = GetAttributeList().GetRtcpFb();
+ }
+ return result;
+}
+
+void
+SdpMediaSection::SetRtcpFbs(const SdpRtcpFbAttributeList& rtcpfbs)
+{
+ if (rtcpfbs.mFeedbacks.empty()) {
+ GetAttributeList().RemoveAttribute(SdpAttribute::kRtcpFbAttribute);
+ return;
+ }
+
+ GetAttributeList().SetAttribute(new SdpRtcpFbAttributeList(rtcpfbs));
+}
+
+void
+SdpMediaSection::SetSsrcs(const std::vector<uint32_t>& ssrcs,
+ const std::string& cname)
+{
+ if (ssrcs.empty()) {
+ GetAttributeList().RemoveAttribute(SdpAttribute::kSsrcAttribute);
+ return;
+ }
+
+ UniquePtr<SdpSsrcAttributeList> ssrcAttr(new SdpSsrcAttributeList);
+ for (auto ssrc : ssrcs) {
+ // When using ssrc attributes, we are required to at least have a cname.
+ // (See https://tools.ietf.org/html/rfc5576#section-6.1)
+ std::string cnameAttr("cname:");
+ cnameAttr += cname;
+ ssrcAttr->PushEntry(ssrc, cnameAttr);
+ }
+
+ GetAttributeList().SetAttribute(ssrcAttr.release());
+}
+
+void
+SdpMediaSection::AddMsid(const std::string& id, const std::string& appdata)
+{
+ UniquePtr<SdpMsidAttributeList> msids(new SdpMsidAttributeList);
+ if (GetAttributeList().HasAttribute(SdpAttribute::kMsidAttribute)) {
+ msids->mMsids = GetAttributeList().GetMsid().mMsids;
+ }
+ msids->PushEntry(id, appdata);
+ GetAttributeList().SetAttribute(msids.release());
+}
+
+const SdpRidAttributeList::Rid*
+SdpMediaSection::FindRid(const std::string& id) const
+{
+ if (!GetAttributeList().HasAttribute(SdpAttribute::kRidAttribute)) {
+ return nullptr;
+ }
+
+ for (const auto& rid : GetAttributeList().GetRid().mRids) {
+ if (rid.id == id) {
+ return &rid;
+ }
+ }
+
+ return nullptr;
+}
+
+} // namespace mozilla
+
diff --git a/media/webrtc/signaling/src/sdp/SdpMediaSection.h b/media/webrtc/signaling/src/sdp/SdpMediaSection.h
new file mode 100644
index 000000000..16242ab16
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SdpMediaSection.h
@@ -0,0 +1,361 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDPMEDIASECTION_H_
+#define _SDPMEDIASECTION_H_
+
+#include "mozilla/Maybe.h"
+#include "signaling/src/sdp/SdpEnum.h"
+#include "signaling/src/sdp/SdpAttributeList.h"
+#include <string>
+#include <vector>
+#include <sstream>
+
+namespace mozilla
+{
+
+class SdpAttributeList;
+
+class SdpConnection;
+
+class SdpMediaSection
+{
+public:
+ enum MediaType { kAudio, kVideo, kText, kApplication, kMessage };
+ // don't add to enum to avoid warnings about unhandled enum values
+ static const size_t kMediaTypes = static_cast<size_t>(kMessage) + 1;
+
+ enum Protocol {
+ kRtpAvp, // RTP/AVP [RFC4566]
+ kUdp, // udp [RFC4566]
+ kVat, // vat [historic]
+ kRtp, // rtp [historic]
+ kUdptl, // udptl [ITU-T]
+ kTcp, // TCP [RFC4145]
+ kRtpAvpf, // RTP/AVPF [RFC4585]
+ kTcpRtpAvp, // TCP/RTP/AVP [RFC4571]
+ kRtpSavp, // RTP/SAVP [RFC3711]
+ kTcpBfcp, // TCP/BFCP [RFC4583]
+ kTcpTlsBfcp, // TCP/TLS/BFCP [RFC4583]
+ kTcpTls, // TCP/TLS [RFC4572]
+ kFluteUdp, // FLUTE/UDP [RFC-mehta-rmt-flute-sdp-05]
+ kTcpMsrp, // TCP/MSRP [RFC4975]
+ kTcpTlsMsrp, // TCP/TLS/MSRP [RFC4975]
+ kDccp, // DCCP [RFC5762]
+ kDccpRtpAvp, // DCCP/RTP/AVP [RFC5762]
+ kDccpRtpSavp, // DCCP/RTP/SAVP [RFC5762]
+ kDccpRtpAvpf, // DCCP/RTP/AVPF [RFC5762]
+ kDccpRtpSavpf, // DCCP/RTP/SAVPF [RFC5762]
+ kRtpSavpf, // RTP/SAVPF [RFC5124]
+ kUdpTlsRtpSavp, // UDP/TLS/RTP/SAVP [RFC5764]
+ kTcpTlsRtpSavp, // TCP/TLS/RTP/SAVP [JSEP-TBD]
+ kDccpTlsRtpSavp, // DCCP/TLS/RTP/SAVP [RFC5764]
+ kUdpTlsRtpSavpf, // UDP/TLS/RTP/SAVPF [RFC5764]
+ kTcpTlsRtpSavpf, // TCP/TLS/RTP/SAVPF [JSEP-TBD]
+ kDccpTlsRtpSavpf, // DCCP/TLS/RTP/SAVPF [RFC5764]
+ kUdpMbmsFecRtpAvp, // UDP/MBMS-FEC/RTP/AVP [RFC6064]
+ kUdpMbmsFecRtpSavp, // UDP/MBMS-FEC/RTP/SAVP [RFC6064]
+ kUdpMbmsRepair, // UDP/MBMS-REPAIR [RFC6064]
+ kFecUdp, // FEC/UDP [RFC6364]
+ kUdpFec, // UDP/FEC [RFC6364]
+ kTcpMrcpv2, // TCP/MRCPv2 [RFC6787]
+ kTcpTlsMrcpv2, // TCP/TLS/MRCPv2 [RFC6787]
+ kPstn, // PSTN [RFC7195]
+ kUdpTlsUdptl, // UDP/TLS/UDPTL [RFC7345]
+ kSctp, // SCTP [draft-ietf-mmusic-sctp-sdp-07]
+ kSctpDtls, // SCTP/DTLS [draft-ietf-mmusic-sctp-sdp-07]
+ kDtlsSctp // DTLS/SCTP [draft-ietf-mmusic-sctp-sdp-07]
+ };
+
+ explicit SdpMediaSection(size_t level) : mLevel(level) {}
+
+ virtual MediaType GetMediaType() const = 0;
+ virtual unsigned int GetPort() const = 0;
+ virtual void SetPort(unsigned int port) = 0;
+ virtual unsigned int GetPortCount() const = 0;
+ virtual Protocol GetProtocol() const = 0;
+ virtual const SdpConnection& GetConnection() const = 0;
+ virtual SdpConnection& GetConnection() = 0;
+ virtual uint32_t GetBandwidth(const std::string& type) const = 0;
+ virtual const std::vector<std::string>& GetFormats() const = 0;
+
+ std::vector<std::string> GetFormatsForSimulcastVersion(
+ size_t simulcastVersion, bool send, bool recv) const;
+ virtual const SdpAttributeList& GetAttributeList() const = 0;
+ virtual SdpAttributeList& GetAttributeList() = 0;
+
+ virtual SdpDirectionAttribute GetDirectionAttribute() const = 0;
+
+ virtual void Serialize(std::ostream&) const = 0;
+
+ virtual void AddCodec(const std::string& pt, const std::string& name,
+ uint32_t clockrate, uint16_t channels) = 0;
+ virtual void ClearCodecs() = 0;
+
+ virtual void AddDataChannel(const std::string& pt, const std::string& name,
+ uint16_t streams) = 0;
+
+ size_t
+ GetLevel() const
+ {
+ return mLevel;
+ }
+
+ inline bool
+ IsReceiving() const
+ {
+ return GetDirectionAttribute().mValue & sdp::kRecv;
+ }
+
+ inline bool
+ IsSending() const
+ {
+ return GetDirectionAttribute().mValue & sdp::kSend;
+ }
+
+ inline void
+ SetReceiving(bool receiving)
+ {
+ auto direction = GetDirectionAttribute().mValue;
+ if (direction & sdp::kSend) {
+ SetDirection(receiving ?
+ SdpDirectionAttribute::kSendrecv :
+ SdpDirectionAttribute::kSendonly);
+ } else {
+ SetDirection(receiving ?
+ SdpDirectionAttribute::kRecvonly :
+ SdpDirectionAttribute::kInactive);
+ }
+ }
+
+ inline void
+ SetSending(bool sending)
+ {
+ auto direction = GetDirectionAttribute().mValue;
+ if (direction & sdp::kRecv) {
+ SetDirection(sending ?
+ SdpDirectionAttribute::kSendrecv :
+ SdpDirectionAttribute::kRecvonly);
+ } else {
+ SetDirection(sending ?
+ SdpDirectionAttribute::kSendonly :
+ SdpDirectionAttribute::kInactive);
+ }
+ }
+
+ inline void SetDirection(SdpDirectionAttribute::Direction direction)
+ {
+ GetAttributeList().SetAttribute(new SdpDirectionAttribute(direction));
+ }
+
+ const SdpFmtpAttributeList::Parameters* FindFmtp(const std::string& pt) const;
+ void SetFmtp(const SdpFmtpAttributeList::Fmtp& fmtp);
+ void RemoveFmtp(const std::string& pt);
+ const SdpRtpmapAttributeList::Rtpmap* FindRtpmap(const std::string& pt) const;
+ const SdpSctpmapAttributeList::Sctpmap* FindSctpmap(
+ const std::string& pt) const;
+ bool HasRtcpFb(const std::string& pt,
+ SdpRtcpFbAttributeList::Type type,
+ const std::string& subType) const;
+ SdpRtcpFbAttributeList GetRtcpFbs() const;
+ void SetRtcpFbs(const SdpRtcpFbAttributeList& rtcpfbs);
+ bool HasFormat(const std::string& format) const
+ {
+ return std::find(GetFormats().begin(), GetFormats().end(), format) !=
+ GetFormats().end();
+ }
+ void SetSsrcs(const std::vector<uint32_t>& ssrcs,
+ const std::string& cname);
+ void AddMsid(const std::string& id, const std::string& appdata);
+ const SdpRidAttributeList::Rid* FindRid(const std::string& id) const;
+
+private:
+ size_t mLevel;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SdpMediaSection& ms)
+{
+ ms.Serialize(os);
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, SdpMediaSection::MediaType t)
+{
+ switch (t) {
+ case SdpMediaSection::kAudio:
+ return os << "audio";
+ case SdpMediaSection::kVideo:
+ return os << "video";
+ case SdpMediaSection::kText:
+ return os << "text";
+ case SdpMediaSection::kApplication:
+ return os << "application";
+ case SdpMediaSection::kMessage:
+ return os << "message";
+ }
+ MOZ_ASSERT(false, "Unknown MediaType");
+ return os << "?";
+}
+
+inline std::ostream& operator<<(std::ostream& os, SdpMediaSection::Protocol p)
+{
+ switch (p) {
+ case SdpMediaSection::kRtpAvp:
+ return os << "RTP/AVP";
+ case SdpMediaSection::kUdp:
+ return os << "udp";
+ case SdpMediaSection::kVat:
+ return os << "vat";
+ case SdpMediaSection::kRtp:
+ return os << "rtp";
+ case SdpMediaSection::kUdptl:
+ return os << "udptl";
+ case SdpMediaSection::kTcp:
+ return os << "TCP";
+ case SdpMediaSection::kRtpAvpf:
+ return os << "RTP/AVPF";
+ case SdpMediaSection::kTcpRtpAvp:
+ return os << "TCP/RTP/AVP";
+ case SdpMediaSection::kRtpSavp:
+ return os << "RTP/SAVP";
+ case SdpMediaSection::kTcpBfcp:
+ return os << "TCP/BFCP";
+ case SdpMediaSection::kTcpTlsBfcp:
+ return os << "TCP/TLS/BFCP";
+ case SdpMediaSection::kTcpTls:
+ return os << "TCP/TLS";
+ case SdpMediaSection::kFluteUdp:
+ return os << "FLUTE/UDP";
+ case SdpMediaSection::kTcpMsrp:
+ return os << "TCP/MSRP";
+ case SdpMediaSection::kTcpTlsMsrp:
+ return os << "TCP/TLS/MSRP";
+ case SdpMediaSection::kDccp:
+ return os << "DCCP";
+ case SdpMediaSection::kDccpRtpAvp:
+ return os << "DCCP/RTP/AVP";
+ case SdpMediaSection::kDccpRtpSavp:
+ return os << "DCCP/RTP/SAVP";
+ case SdpMediaSection::kDccpRtpAvpf:
+ return os << "DCCP/RTP/AVPF";
+ case SdpMediaSection::kDccpRtpSavpf:
+ return os << "DCCP/RTP/SAVPF";
+ case SdpMediaSection::kRtpSavpf:
+ return os << "RTP/SAVPF";
+ case SdpMediaSection::kUdpTlsRtpSavp:
+ return os << "UDP/TLS/RTP/SAVP";
+ case SdpMediaSection::kTcpTlsRtpSavp:
+ return os << "TCP/TLS/RTP/SAVP";
+ case SdpMediaSection::kDccpTlsRtpSavp:
+ return os << "DCCP/TLS/RTP/SAVP";
+ case SdpMediaSection::kUdpTlsRtpSavpf:
+ return os << "UDP/TLS/RTP/SAVPF";
+ case SdpMediaSection::kTcpTlsRtpSavpf:
+ return os << "TCP/TLS/RTP/SAVPF";
+ case SdpMediaSection::kDccpTlsRtpSavpf:
+ return os << "DCCP/TLS/RTP/SAVPF";
+ case SdpMediaSection::kUdpMbmsFecRtpAvp:
+ return os << "UDP/MBMS-FEC/RTP/AVP";
+ case SdpMediaSection::kUdpMbmsFecRtpSavp:
+ return os << "UDP/MBMS-FEC/RTP/SAVP";
+ case SdpMediaSection::kUdpMbmsRepair:
+ return os << "UDP/MBMS-REPAIR";
+ case SdpMediaSection::kFecUdp:
+ return os << "FEC/UDP";
+ case SdpMediaSection::kUdpFec:
+ return os << "UDP/FEC";
+ case SdpMediaSection::kTcpMrcpv2:
+ return os << "TCP/MRCPv2";
+ case SdpMediaSection::kTcpTlsMrcpv2:
+ return os << "TCP/TLS/MRCPv2";
+ case SdpMediaSection::kPstn:
+ return os << "PSTN";
+ case SdpMediaSection::kUdpTlsUdptl:
+ return os << "UDP/TLS/UDPTL";
+ case SdpMediaSection::kSctp:
+ return os << "SCTP";
+ case SdpMediaSection::kSctpDtls:
+ return os << "SCTP/DTLS";
+ case SdpMediaSection::kDtlsSctp:
+ return os << "DTLS/SCTP";
+ }
+ MOZ_ASSERT(false, "Unknown Protocol");
+ return os << "?";
+}
+
+class SdpConnection
+{
+public:
+ SdpConnection(sdp::AddrType addrType, std::string addr, uint8_t ttl = 0,
+ uint32_t count = 0)
+ : mAddrType(addrType), mAddr(addr), mTtl(ttl), mCount(count)
+ {
+ }
+ ~SdpConnection() {}
+
+ sdp::AddrType
+ GetAddrType() const
+ {
+ return mAddrType;
+ }
+ const std::string&
+ GetAddress() const
+ {
+ return mAddr;
+ }
+ void
+ SetAddress(const std::string& address)
+ {
+ mAddr = address;
+ if (mAddr.find(':') != std::string::npos) {
+ mAddrType = sdp::kIPv6;
+ } else {
+ mAddrType = sdp::kIPv4;
+ }
+ }
+ uint8_t
+ GetTtl() const
+ {
+ return mTtl;
+ }
+ uint32_t
+ GetCount() const
+ {
+ return mCount;
+ }
+
+ void
+ Serialize(std::ostream& os) const
+ {
+ sdp::NetType netType = sdp::kInternet;
+
+ os << "c=" << netType << " " << mAddrType << " " << mAddr;
+
+ if (mTtl) {
+ os << "/" << static_cast<uint32_t>(mTtl);
+ if (mCount) {
+ os << "/" << mCount;
+ }
+ }
+ os << "\r\n";
+ }
+
+private:
+ sdp::AddrType mAddrType;
+ std::string mAddr;
+ uint8_t mTtl; // 0-255; 0 when unset
+ uint32_t mCount; // 0 when unset
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SdpConnection& c)
+{
+ c.Serialize(os);
+ return os;
+}
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SipccSdp.cpp b/media/webrtc/signaling/src/sdp/SipccSdp.cpp
new file mode 100644
index 000000000..c23fcf7e9
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdp.cpp
@@ -0,0 +1,180 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SipccSdp.h"
+
+#include <cstdlib>
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Assertions.h"
+#include "signaling/src/sdp/SdpErrorHolder.h"
+
+#ifdef CRLF
+#undef CRLF
+#endif
+#define CRLF "\r\n"
+
+namespace mozilla
+{
+
+const SdpOrigin&
+SipccSdp::GetOrigin() const
+{
+ return mOrigin;
+}
+
+uint32_t
+SipccSdp::GetBandwidth(const std::string& type) const
+{
+ auto found = mBandwidths.find(type);
+ if (found == mBandwidths.end()) {
+ return 0;
+ }
+ return found->second;
+}
+
+const SdpMediaSection&
+SipccSdp::GetMediaSection(size_t level) const
+{
+ if (level > mMediaSections.values.size()) {
+ MOZ_CRASH();
+ }
+ return *mMediaSections.values[level];
+}
+
+SdpMediaSection&
+SipccSdp::GetMediaSection(size_t level)
+{
+ if (level > mMediaSections.values.size()) {
+ MOZ_CRASH();
+ }
+ return *mMediaSections.values[level];
+}
+
+SdpMediaSection&
+SipccSdp::AddMediaSection(SdpMediaSection::MediaType mediaType,
+ SdpDirectionAttribute::Direction dir, uint16_t port,
+ SdpMediaSection::Protocol protocol,
+ sdp::AddrType addrType, const std::string& addr)
+{
+ size_t level = mMediaSections.values.size();
+ SipccSdpMediaSection* media =
+ new SipccSdpMediaSection(level, &mAttributeList);
+ media->mMediaType = mediaType;
+ media->mPort = port;
+ media->mPortCount = 0;
+ media->mProtocol = protocol;
+ media->mConnection = MakeUnique<SdpConnection>(addrType, addr);
+ media->GetAttributeList().SetAttribute(new SdpDirectionAttribute(dir));
+ mMediaSections.values.push_back(media);
+ return *media;
+}
+
+bool
+SipccSdp::LoadOrigin(sdp_t* sdp, SdpErrorHolder& errorHolder)
+{
+ std::string username = sdp_get_owner_username(sdp);
+ uint64_t sessId = strtoull(sdp_get_owner_sessionid(sdp), nullptr, 10);
+ uint64_t sessVer = strtoull(sdp_get_owner_version(sdp), nullptr, 10);
+
+ sdp_nettype_e type = sdp_get_owner_network_type(sdp);
+ if (type != SDP_NT_INTERNET) {
+ errorHolder.AddParseError(2, "Unsupported network type");
+ return false;
+ }
+
+ sdp::AddrType addrType;
+ switch (sdp_get_owner_address_type(sdp)) {
+ case SDP_AT_IP4:
+ addrType = sdp::kIPv4;
+ break;
+ case SDP_AT_IP6:
+ addrType = sdp::kIPv6;
+ break;
+ default:
+ errorHolder.AddParseError(2, "Unsupported address type");
+ return false;
+ }
+
+ std::string address = sdp_get_owner_address(sdp);
+ mOrigin = SdpOrigin(username, sessId, sessVer, addrType, address);
+ return true;
+}
+
+bool
+SipccSdp::Load(sdp_t* sdp, SdpErrorHolder& errorHolder)
+{
+ // Believe it or not, SDP_SESSION_LEVEL is 0xFFFF
+ if (!mAttributeList.Load(sdp, SDP_SESSION_LEVEL, errorHolder)) {
+ return false;
+ }
+
+ if (!LoadOrigin(sdp, errorHolder)) {
+ return false;
+ }
+
+ if (!mBandwidths.Load(sdp, SDP_SESSION_LEVEL, errorHolder)) {
+ return false;
+ }
+
+ for (int i = 0; i < sdp_get_num_media_lines(sdp); ++i) {
+ // note that we pass a "level" here that is one higher
+ // sipcc counts media sections from 1, using 0xFFFF as the "session"
+ UniquePtr<SipccSdpMediaSection> section(
+ new SipccSdpMediaSection(i, &mAttributeList));
+ if (!section->Load(sdp, i + 1, errorHolder)) {
+ return false;
+ }
+ mMediaSections.values.push_back(section.release());
+ }
+ return true;
+}
+
+void
+SipccSdp::Serialize(std::ostream& os) const
+{
+ os << "v=0" << CRLF << mOrigin << "s=-" << CRLF;
+
+ // We don't support creating i=, u=, e=, p=
+ // We don't generate c= at the session level (only in media)
+
+ mBandwidths.Serialize(os);
+ os << "t=0 0" << CRLF;
+
+ // We don't support r= or z=
+
+ // attributes
+ os << mAttributeList;
+
+ // media sections
+ for (const SdpMediaSection* msection : mMediaSections.values) {
+ os << *msection;
+ }
+}
+
+bool
+SipccSdpBandwidths::Load(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ size_t count = sdp_get_num_bw_lines(sdp, level);
+ for (size_t i = 1; i <= count; ++i) {
+ sdp_bw_modifier_e bwtype = sdp_get_bw_modifier(sdp, level, i);
+ uint32_t bandwidth = sdp_get_bw_value(sdp, level, i);
+ if (bwtype != SDP_BW_MODIFIER_UNSUPPORTED) {
+ const char* typeName = sdp_get_bw_modifier_name(bwtype);
+ (*this)[typeName] = bandwidth;
+ }
+ }
+
+ return true;
+}
+
+void
+SipccSdpBandwidths::Serialize(std::ostream& os) const
+{
+ for (auto i = begin(); i != end(); ++i) {
+ os << "b=" << i->first << ":" << i->second << CRLF;
+ }
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/sdp/SipccSdp.h b/media/webrtc/signaling/src/sdp/SipccSdp.h
new file mode 100644
index 000000000..04ef202ab
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdp.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SIPCCSDP_H_
+#define _SIPCCSDP_H_
+
+#include <map>
+#include <vector>
+#include "mozilla/Attributes.h"
+
+#include "signaling/src/sdp/Sdp.h"
+#include "signaling/src/sdp/SipccSdpMediaSection.h"
+#include "signaling/src/sdp/SipccSdpAttributeList.h"
+extern "C" {
+#include "signaling/src/sdp/sipcc/sdp.h"
+}
+
+#include "signaling/src/common/PtrVector.h"
+
+namespace mozilla
+{
+
+class SipccSdpParser;
+class SdpErrorHolder;
+
+class SipccSdp final : public Sdp
+{
+ friend class SipccSdpParser;
+
+public:
+ explicit SipccSdp(const SdpOrigin& origin)
+ : mOrigin(origin), mAttributeList(nullptr)
+ {
+ }
+
+ virtual const SdpOrigin& GetOrigin() const override;
+
+ // Note: connection information is always retrieved from media sections
+ virtual uint32_t GetBandwidth(const std::string& type) const override;
+
+ virtual size_t
+ GetMediaSectionCount() const override
+ {
+ return mMediaSections.values.size();
+ }
+
+ virtual const SdpAttributeList&
+ GetAttributeList() const override
+ {
+ return mAttributeList;
+ }
+
+ virtual SdpAttributeList&
+ GetAttributeList() override
+ {
+ return mAttributeList;
+ }
+
+ virtual const SdpMediaSection& GetMediaSection(size_t level) const
+ override;
+
+ virtual SdpMediaSection& GetMediaSection(size_t level) override;
+
+ virtual SdpMediaSection& AddMediaSection(
+ SdpMediaSection::MediaType media, SdpDirectionAttribute::Direction dir,
+ uint16_t port, SdpMediaSection::Protocol proto, sdp::AddrType addrType,
+ const std::string& addr) override;
+
+ virtual void Serialize(std::ostream&) const override;
+
+private:
+ SipccSdp() : mOrigin("", 0, 0, sdp::kIPv4, ""), mAttributeList(nullptr) {}
+
+ bool Load(sdp_t* sdp, SdpErrorHolder& errorHolder);
+ bool LoadOrigin(sdp_t* sdp, SdpErrorHolder& errorHolder);
+
+ SdpOrigin mOrigin;
+ SipccSdpBandwidths mBandwidths;
+ SipccSdpAttributeList mAttributeList;
+ PtrVector<SipccSdpMediaSection> mMediaSections;
+};
+
+} // namespace mozilla
+
+#endif // _sdp_h_
diff --git a/media/webrtc/signaling/src/sdp/SipccSdpAttributeList.cpp b/media/webrtc/signaling/src/sdp/SipccSdpAttributeList.cpp
new file mode 100644
index 000000000..5357f4728
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdpAttributeList.cpp
@@ -0,0 +1,1413 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SipccSdpAttributeList.h"
+
+#include <ostream>
+#include "mozilla/Assertions.h"
+#include "signaling/src/sdp/SdpErrorHolder.h"
+
+extern "C" {
+#include "signaling/src/sdp/sipcc/sdp_private.h"
+}
+
+namespace mozilla
+{
+
+/* static */ const std::string SipccSdpAttributeList::kEmptyString = "";
+
+SipccSdpAttributeList::SipccSdpAttributeList(
+ const SipccSdpAttributeList* sessionLevel)
+ : mSessionLevel(sessionLevel)
+{
+ memset(&mAttributes, 0, sizeof(mAttributes));
+}
+
+SipccSdpAttributeList::~SipccSdpAttributeList()
+{
+ for (size_t i = 0; i < kNumAttributeTypes; ++i) {
+ delete mAttributes[i];
+ }
+}
+
+bool
+SipccSdpAttributeList::HasAttribute(AttributeType type,
+ bool sessionFallback) const
+{
+ return !!GetAttribute(type, sessionFallback);
+}
+
+const SdpAttribute*
+SipccSdpAttributeList::GetAttribute(AttributeType type,
+ bool sessionFallback) const
+{
+ const SdpAttribute* value = mAttributes[static_cast<size_t>(type)];
+ // Only do fallback when the attribute can appear at both the media and
+ // session level
+ if (!value && !AtSessionLevel() && sessionFallback &&
+ SdpAttribute::IsAllowedAtSessionLevel(type) &&
+ SdpAttribute::IsAllowedAtMediaLevel(type)) {
+ return mSessionLevel->GetAttribute(type, false);
+ }
+ return value;
+}
+
+void
+SipccSdpAttributeList::RemoveAttribute(AttributeType type)
+{
+ delete mAttributes[static_cast<size_t>(type)];
+ mAttributes[static_cast<size_t>(type)] = nullptr;
+}
+
+void
+SipccSdpAttributeList::Clear()
+{
+ for (size_t i = 0; i < kNumAttributeTypes; ++i) {
+ RemoveAttribute(static_cast<AttributeType>(i));
+ }
+}
+
+void
+SipccSdpAttributeList::SetAttribute(SdpAttribute* attr)
+{
+ if (!IsAllowedHere(attr->GetType())) {
+ MOZ_ASSERT(false, "This type of attribute is not allowed here");
+ return;
+ }
+ RemoveAttribute(attr->GetType());
+ mAttributes[attr->GetType()] = attr;
+}
+
+void
+SipccSdpAttributeList::LoadSimpleString(sdp_t* sdp, uint16_t level,
+ sdp_attr_e attr,
+ AttributeType targetType,
+ SdpErrorHolder& errorHolder)
+{
+ const char* value = sdp_attr_get_simple_string(sdp, attr, level, 0, 1);
+ if (value) {
+ if (!IsAllowedHere(targetType)) {
+ uint32_t lineNumber = sdp_attr_line_number(sdp, attr, level, 0, 1);
+ WarnAboutMisplacedAttribute(targetType, lineNumber, errorHolder);
+ } else {
+ SetAttribute(new SdpStringAttribute(targetType, std::string(value)));
+ }
+ }
+}
+
+void
+SipccSdpAttributeList::LoadSimpleStrings(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ LoadSimpleString(sdp, level, SDP_ATTR_MID, SdpAttribute::kMidAttribute,
+ errorHolder);
+ LoadSimpleString(sdp, level, SDP_ATTR_LABEL, SdpAttribute::kLabelAttribute,
+ errorHolder);
+}
+
+void
+SipccSdpAttributeList::LoadSimpleNumber(sdp_t* sdp, uint16_t level,
+ sdp_attr_e attr,
+ AttributeType targetType,
+ SdpErrorHolder& errorHolder)
+{
+ if (sdp_attr_valid(sdp, attr, level, 0, 1)) {
+ if (!IsAllowedHere(targetType)) {
+ uint32_t lineNumber = sdp_attr_line_number(sdp, attr, level, 0, 1);
+ WarnAboutMisplacedAttribute(targetType, lineNumber, errorHolder);
+ } else {
+ uint32_t value = sdp_attr_get_simple_u32(sdp, attr, level, 0, 1);
+ SetAttribute(new SdpNumberAttribute(targetType, value));
+ }
+ }
+}
+
+void
+SipccSdpAttributeList::LoadSimpleNumbers(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ LoadSimpleNumber(sdp, level, SDP_ATTR_PTIME, SdpAttribute::kPtimeAttribute,
+ errorHolder);
+ LoadSimpleNumber(sdp, level, SDP_ATTR_MAXPTIME,
+ SdpAttribute::kMaxptimeAttribute, errorHolder);
+}
+
+void
+SipccSdpAttributeList::LoadFlags(sdp_t* sdp, uint16_t level)
+{
+ if (AtSessionLevel()) {
+ if (sdp_attr_valid(sdp, SDP_ATTR_ICE_LITE, level, 0, 1)) {
+ SetAttribute(new SdpFlagAttribute(SdpAttribute::kIceLiteAttribute));
+ }
+ } else { // media-level
+ if (sdp_attr_valid(sdp, SDP_ATTR_RTCP_MUX, level, 0, 1)) {
+ SetAttribute(new SdpFlagAttribute(SdpAttribute::kRtcpMuxAttribute));
+ }
+ if (sdp_attr_valid(sdp, SDP_ATTR_END_OF_CANDIDATES, level, 0, 1)) {
+ SetAttribute(
+ new SdpFlagAttribute(SdpAttribute::kEndOfCandidatesAttribute));
+ }
+ if (sdp_attr_valid(sdp, SDP_ATTR_BUNDLE_ONLY, level, 0, 1)) {
+ SetAttribute(new SdpFlagAttribute(SdpAttribute::kBundleOnlyAttribute));
+ }
+ }
+}
+
+static void
+ConvertDirection(sdp_direction_e sipcc_direction,
+ SdpDirectionAttribute::Direction* dir_outparam)
+{
+ switch (sipcc_direction) {
+ case SDP_DIRECTION_SENDRECV:
+ *dir_outparam = SdpDirectionAttribute::kSendrecv;
+ return;
+ case SDP_DIRECTION_SENDONLY:
+ *dir_outparam = SdpDirectionAttribute::kSendonly;
+ return;
+ case SDP_DIRECTION_RECVONLY:
+ *dir_outparam = SdpDirectionAttribute::kRecvonly;
+ return;
+ case SDP_DIRECTION_INACTIVE:
+ *dir_outparam = SdpDirectionAttribute::kInactive;
+ return;
+ case SDP_MAX_QOS_DIRECTIONS:
+ // Nothing actually sets this value.
+ // Fall through to MOZ_CRASH below.
+ {
+ }
+ }
+
+ MOZ_CRASH("Invalid direction from sipcc; this is probably corruption");
+}
+
+void
+SipccSdpAttributeList::LoadDirection(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ SdpDirectionAttribute::Direction dir;
+ ConvertDirection(sdp_get_media_direction(sdp, level, 0), &dir);
+ SetAttribute(new SdpDirectionAttribute(dir));
+}
+
+void
+SipccSdpAttributeList::LoadIceAttributes(sdp_t* sdp, uint16_t level)
+{
+ char* value;
+ sdp_result_e sdpres =
+ sdp_attr_get_ice_attribute(sdp, level, 0, SDP_ATTR_ICE_UFRAG, 1, &value);
+ if (sdpres == SDP_SUCCESS) {
+ SetAttribute(new SdpStringAttribute(SdpAttribute::kIceUfragAttribute,
+ std::string(value)));
+ }
+ sdpres =
+ sdp_attr_get_ice_attribute(sdp, level, 0, SDP_ATTR_ICE_PWD, 1, &value);
+ if (sdpres == SDP_SUCCESS) {
+ SetAttribute(new SdpStringAttribute(SdpAttribute::kIcePwdAttribute,
+ std::string(value)));
+ }
+
+ const char* iceOptVal =
+ sdp_attr_get_simple_string(sdp, SDP_ATTR_ICE_OPTIONS, level, 0, 1);
+ if (iceOptVal) {
+ auto* iceOptions =
+ new SdpOptionsAttribute(SdpAttribute::kIceOptionsAttribute);
+ iceOptions->Load(iceOptVal);
+ SetAttribute(iceOptions);
+ }
+}
+
+bool
+SipccSdpAttributeList::LoadFingerprint(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ char* value;
+ UniquePtr<SdpFingerprintAttributeList> fingerprintAttrs;
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_result_e result = sdp_attr_get_dtls_fingerprint_attribute(
+ sdp, level, 0, SDP_ATTR_DTLS_FINGERPRINT, i, &value);
+
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+
+ std::string fingerprintAttr(value);
+ uint32_t lineNumber =
+ sdp_attr_line_number(sdp, SDP_ATTR_DTLS_FINGERPRINT, level, 0, i);
+
+ // sipcc does not expose parse code for this
+ size_t start = fingerprintAttr.find_first_not_of(" \t");
+ if (start == std::string::npos) {
+ errorHolder.AddParseError(lineNumber, "Empty fingerprint attribute");
+ return false;
+ }
+
+ size_t end = fingerprintAttr.find_first_of(" \t", start);
+ if (end == std::string::npos) {
+ // One token, no trailing ws
+ errorHolder.AddParseError(lineNumber,
+ "Only one token in fingerprint attribute");
+ return false;
+ }
+
+ std::string algorithmToken(fingerprintAttr.substr(start, end - start));
+
+ start = fingerprintAttr.find_first_not_of(" \t", end);
+ if (start == std::string::npos) {
+ // One token, trailing ws
+ errorHolder.AddParseError(lineNumber,
+ "Only one token in fingerprint attribute");
+ return false;
+ }
+
+ std::string fingerprintToken(fingerprintAttr.substr(start));
+
+ std::vector<uint8_t> fingerprint =
+ SdpFingerprintAttributeList::ParseFingerprint(fingerprintToken);
+ if (fingerprint.size() == 0) {
+ errorHolder.AddParseError(lineNumber, "Malformed fingerprint token");
+ return false;
+ }
+
+ if (!fingerprintAttrs) {
+ fingerprintAttrs.reset(new SdpFingerprintAttributeList);
+ }
+
+ // Don't assert on unknown algorithm, just skip
+ fingerprintAttrs->PushEntry(algorithmToken, fingerprint, false);
+ }
+
+ if (fingerprintAttrs) {
+ SetAttribute(fingerprintAttrs.release());
+ }
+
+ return true;
+}
+
+void
+SipccSdpAttributeList::LoadCandidate(sdp_t* sdp, uint16_t level)
+{
+ char* value;
+ auto candidates =
+ MakeUnique<SdpMultiStringAttribute>(SdpAttribute::kCandidateAttribute);
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_result_e result = sdp_attr_get_ice_attribute(
+ sdp, level, 0, SDP_ATTR_ICE_CANDIDATE, i, &value);
+
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+
+ candidates->mValues.push_back(value);
+ }
+
+ if (!candidates->mValues.empty()) {
+ SetAttribute(candidates.release());
+ }
+}
+
+bool
+SipccSdpAttributeList::LoadSctpmap(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ auto sctpmap = MakeUnique<SdpSctpmapAttributeList>();
+ for (uint16_t i = 0; i < UINT16_MAX; ++i) {
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_SCTPMAP, i + 1);
+
+ if (!attr) {
+ break;
+ }
+
+ // Yeah, this is a little weird, but for now we'll just store this as a
+ // payload type.
+ uint16_t payloadType = attr->attr.sctpmap.port;
+ uint16_t streams = attr->attr.sctpmap.streams;
+ const char* name = attr->attr.sctpmap.protocol;
+
+ std::ostringstream osPayloadType;
+ osPayloadType << payloadType;
+ sctpmap->PushEntry(osPayloadType.str(), name, streams);
+ }
+
+ if (!sctpmap->mSctpmaps.empty()) {
+ SetAttribute(sctpmap.release());
+ }
+
+ return true;
+}
+
+SdpRtpmapAttributeList::CodecType
+SipccSdpAttributeList::GetCodecType(rtp_ptype type)
+{
+ switch (type) {
+ case RTP_PCMU:
+ return SdpRtpmapAttributeList::kPCMU;
+ case RTP_PCMA:
+ return SdpRtpmapAttributeList::kPCMA;
+ case RTP_G722:
+ return SdpRtpmapAttributeList::kG722;
+ case RTP_H264_P0:
+ case RTP_H264_P1:
+ return SdpRtpmapAttributeList::kH264;
+ case RTP_OPUS:
+ return SdpRtpmapAttributeList::kOpus;
+ case RTP_VP8:
+ return SdpRtpmapAttributeList::kVP8;
+ case RTP_VP9:
+ return SdpRtpmapAttributeList::kVP9;
+ case RTP_RED:
+ return SdpRtpmapAttributeList::kRed;
+ case RTP_ULPFEC:
+ return SdpRtpmapAttributeList::kUlpfec;
+ case RTP_TELEPHONE_EVENT:
+ return SdpRtpmapAttributeList::kTelephoneEvent;
+ case RTP_NONE:
+ // Happens when sipcc doesn't know how to translate to the enum
+ case RTP_CELP:
+ case RTP_G726:
+ case RTP_GSM:
+ case RTP_G723:
+ case RTP_DVI4:
+ case RTP_DVI4_II:
+ case RTP_LPC:
+ case RTP_G728:
+ case RTP_G729:
+ case RTP_JPEG:
+ case RTP_NV:
+ case RTP_H261:
+ case RTP_L16:
+ case RTP_H263:
+ case RTP_ILBC:
+ case RTP_I420:
+ return SdpRtpmapAttributeList::kOtherCodec;
+ }
+ MOZ_CRASH("Invalid codec type from sipcc. Probably corruption.");
+}
+
+bool
+SipccSdpAttributeList::LoadRtpmap(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ auto rtpmap = MakeUnique<SdpRtpmapAttributeList>();
+ uint16_t count;
+ sdp_result_e result =
+ sdp_attr_num_instances(sdp, level, 0, SDP_ATTR_RTPMAP, &count);
+ if (result != SDP_SUCCESS) {
+ MOZ_ASSERT(false, "Unable to get rtpmap size");
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Unable to get rtpmap size");
+ return false;
+ }
+ for (uint16_t i = 0; i < count; ++i) {
+ uint16_t pt = sdp_attr_get_rtpmap_payload_type(sdp, level, 0, i + 1);
+ const char* ccName = sdp_attr_get_rtpmap_encname(sdp, level, 0, i + 1);
+
+ if (!ccName) {
+ // Probably no rtpmap attribute for a pt in an m-line
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "No rtpmap attribute for payload type");
+ continue;
+ }
+
+ std::string name(ccName);
+
+ SdpRtpmapAttributeList::CodecType codec =
+ GetCodecType(sdp_get_known_payload_type(sdp, level, pt));
+
+ uint32_t clock = sdp_attr_get_rtpmap_clockrate(sdp, level, 0, i + 1);
+ uint16_t channels = 0;
+
+ // sipcc gives us a channels value of "1" for video
+ if (sdp_get_media_type(sdp, level) == SDP_MEDIA_AUDIO) {
+ channels = sdp_attr_get_rtpmap_num_chan(sdp, level, 0, i + 1);
+ }
+
+ std::ostringstream osPayloadType;
+ osPayloadType << pt;
+ rtpmap->PushEntry(osPayloadType.str(), codec, name, clock, channels);
+ }
+
+ if (!rtpmap->mRtpmaps.empty()) {
+ SetAttribute(rtpmap.release());
+ }
+
+ return true;
+}
+
+void
+SipccSdpAttributeList::LoadSetup(sdp_t* sdp, uint16_t level)
+{
+ sdp_setup_type_e setupType;
+ auto sdpres = sdp_attr_get_setup_attribute(sdp, level, 0, 1, &setupType);
+
+ if (sdpres != SDP_SUCCESS) {
+ return;
+ }
+
+ switch (setupType) {
+ case SDP_SETUP_ACTIVE:
+ SetAttribute(new SdpSetupAttribute(SdpSetupAttribute::kActive));
+ return;
+ case SDP_SETUP_PASSIVE:
+ SetAttribute(new SdpSetupAttribute(SdpSetupAttribute::kPassive));
+ return;
+ case SDP_SETUP_ACTPASS:
+ SetAttribute(new SdpSetupAttribute(SdpSetupAttribute::kActpass));
+ return;
+ case SDP_SETUP_HOLDCONN:
+ SetAttribute(new SdpSetupAttribute(SdpSetupAttribute::kHoldconn));
+ return;
+ case SDP_SETUP_UNKNOWN:
+ return;
+ case SDP_SETUP_NOT_FOUND:
+ case SDP_MAX_SETUP:
+ // There is no code that will set these.
+ // Fall through to MOZ_CRASH() below.
+ {
+ }
+ }
+
+ MOZ_CRASH("Invalid setup type from sipcc. This is probably corruption.");
+}
+
+void
+SipccSdpAttributeList::LoadSsrc(sdp_t* sdp, uint16_t level)
+{
+ auto ssrcs = MakeUnique<SdpSsrcAttributeList>();
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_SSRC, i);
+
+ if (!attr) {
+ break;
+ }
+
+ sdp_ssrc_t* ssrc = &(attr->attr.ssrc);
+ ssrcs->PushEntry(ssrc->ssrc, ssrc->attribute);
+ }
+
+ if (!ssrcs->mSsrcs.empty()) {
+ SetAttribute(ssrcs.release());
+ }
+}
+
+bool
+SipccSdpAttributeList::LoadImageattr(sdp_t* sdp,
+ uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ UniquePtr<SdpImageattrAttributeList> imageattrs(
+ new SdpImageattrAttributeList);
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ const char* imageattrRaw = sdp_attr_get_simple_string(sdp,
+ SDP_ATTR_IMAGEATTR,
+ level,
+ 0,
+ i);
+ if (!imageattrRaw) {
+ break;
+ }
+
+ std::string error;
+ size_t errorPos;
+ if (!imageattrs->PushEntry(imageattrRaw, &error, &errorPos)) {
+ std::ostringstream fullError;
+ fullError << error << " at column " << errorPos;
+ errorHolder.AddParseError(
+ sdp_attr_line_number(sdp, SDP_ATTR_IMAGEATTR, level, 0, i),
+ fullError.str());
+ return false;
+ }
+ }
+
+ if (!imageattrs->mImageattrs.empty()) {
+ SetAttribute(imageattrs.release());
+ }
+ return true;
+}
+
+bool
+SipccSdpAttributeList::LoadSimulcast(sdp_t* sdp,
+ uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ const char* simulcastRaw = sdp_attr_get_simple_string(sdp,
+ SDP_ATTR_SIMULCAST,
+ level,
+ 0,
+ 1);
+ if (!simulcastRaw) {
+ return true;
+ }
+
+ UniquePtr<SdpSimulcastAttribute> simulcast(
+ new SdpSimulcastAttribute);
+
+ std::istringstream is(simulcastRaw);
+ std::string error;
+ if (!simulcast->Parse(is, &error)) {
+ std::ostringstream fullError;
+ fullError << error << " at column " << is.tellg();
+ errorHolder.AddParseError(
+ sdp_attr_line_number(sdp, SDP_ATTR_SIMULCAST, level, 0, 1),
+ fullError.str());
+ return false;
+ }
+
+ SetAttribute(simulcast.release());
+ return true;
+}
+
+bool
+SipccSdpAttributeList::LoadGroups(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ uint16_t attrCount = 0;
+ if (sdp_attr_num_instances(sdp, level, 0, SDP_ATTR_GROUP, &attrCount) !=
+ SDP_SUCCESS) {
+ MOZ_ASSERT(false, "Could not get count of group attributes");
+ errorHolder.AddParseError(0, "Could not get count of group attributes");
+ return false;
+ }
+
+ UniquePtr<SdpGroupAttributeList> groups = MakeUnique<SdpGroupAttributeList>();
+ for (uint16_t attr = 1; attr <= attrCount; ++attr) {
+ SdpGroupAttributeList::Semantics semantics;
+ std::vector<std::string> tags;
+
+ switch (sdp_get_group_attr(sdp, level, 0, attr)) {
+ case SDP_GROUP_ATTR_FID:
+ semantics = SdpGroupAttributeList::kFid;
+ break;
+ case SDP_GROUP_ATTR_LS:
+ semantics = SdpGroupAttributeList::kLs;
+ break;
+ case SDP_GROUP_ATTR_ANAT:
+ semantics = SdpGroupAttributeList::kAnat;
+ break;
+ case SDP_GROUP_ATTR_BUNDLE:
+ semantics = SdpGroupAttributeList::kBundle;
+ break;
+ default:
+ continue;
+ }
+
+ uint16_t idCount = sdp_get_group_num_id(sdp, level, 0, attr);
+ for (uint16_t id = 1; id <= idCount; ++id) {
+ const char* idStr = sdp_get_group_id(sdp, level, 0, attr, id);
+ if (!idStr) {
+ std::ostringstream os;
+ os << "bad a=group identifier at " << (attr - 1) << ", " << (id - 1);
+ errorHolder.AddParseError(0, os.str());
+ return false;
+ }
+ tags.push_back(std::string(idStr));
+ }
+ groups->PushEntry(semantics, tags);
+ }
+
+ if (!groups->mGroups.empty()) {
+ SetAttribute(groups.release());
+ }
+
+ return true;
+}
+
+bool
+SipccSdpAttributeList::LoadMsidSemantics(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ auto msidSemantics = MakeUnique<SdpMsidSemanticAttributeList>();
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_MSID_SEMANTIC, i);
+
+ if (!attr) {
+ break;
+ }
+
+ sdp_msid_semantic_t* msid_semantic = &(attr->attr.msid_semantic);
+ std::vector<std::string> msids;
+ for (size_t i = 0; i < SDP_MAX_MEDIA_STREAMS; ++i) {
+ if (!msid_semantic->msids[i]) {
+ break;
+ }
+
+ msids.push_back(msid_semantic->msids[i]);
+ }
+
+ msidSemantics->PushEntry(msid_semantic->semantic, msids);
+ }
+
+ if (!msidSemantics->mMsidSemantics.empty()) {
+ SetAttribute(msidSemantics.release());
+ }
+ return true;
+}
+
+void
+SipccSdpAttributeList::LoadIdentity(sdp_t* sdp, uint16_t level)
+{
+ const char* val = sdp_attr_get_long_string(sdp, SDP_ATTR_IDENTITY, level, 0, 1);
+ if (val) {
+ SetAttribute(new SdpStringAttribute(SdpAttribute::kIdentityAttribute,
+ std::string(val)));
+ }
+}
+
+void
+SipccSdpAttributeList::LoadDtlsMessage(sdp_t* sdp, uint16_t level)
+{
+ const char* val = sdp_attr_get_long_string(sdp, SDP_ATTR_DTLS_MESSAGE, level,
+ 0, 1);
+ if (val) {
+ // sipcc does not expose parse code for this, so we use a SDParta-provided
+ // parser
+ std::string strval(val);
+ SetAttribute(new SdpDtlsMessageAttribute(strval));
+ }
+}
+
+void
+SipccSdpAttributeList::LoadFmtp(sdp_t* sdp, uint16_t level)
+{
+ auto fmtps = MakeUnique<SdpFmtpAttributeList>();
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_FMTP, i);
+
+ if (!attr) {
+ break;
+ }
+
+ sdp_fmtp_t* fmtp = &(attr->attr.fmtp);
+
+ // Get the payload type
+ std::stringstream osPayloadType;
+ // payload_num is the number in the fmtp attribute, verbatim
+ osPayloadType << fmtp->payload_num;
+
+ // Get parsed form of parameters, if supported
+ UniquePtr<SdpFmtpAttributeList::Parameters> parameters;
+
+ rtp_ptype codec = sdp_get_known_payload_type(sdp, level, fmtp->payload_num);
+
+ switch (codec) {
+ case RTP_H264_P0:
+ case RTP_H264_P1: {
+ SdpFmtpAttributeList::H264Parameters* h264Parameters(
+ new SdpFmtpAttributeList::H264Parameters);
+
+ sstrncpy(h264Parameters->sprop_parameter_sets, fmtp->parameter_sets,
+ sizeof(h264Parameters->sprop_parameter_sets));
+
+ h264Parameters->level_asymmetry_allowed =
+ !!(fmtp->level_asymmetry_allowed);
+
+ h264Parameters->packetization_mode = fmtp->packetization_mode;
+ sscanf(fmtp->profile_level_id, "%x", &h264Parameters->profile_level_id);
+ h264Parameters->max_mbps = fmtp->max_mbps;
+ h264Parameters->max_fs = fmtp->max_fs;
+ h264Parameters->max_cpb = fmtp->max_cpb;
+ h264Parameters->max_dpb = fmtp->max_dpb;
+ h264Parameters->max_br = fmtp->max_br;
+
+ parameters.reset(h264Parameters);
+ } break;
+ case RTP_VP9: {
+ SdpFmtpAttributeList::VP8Parameters* vp9Parameters(
+ new SdpFmtpAttributeList::VP8Parameters(
+ SdpRtpmapAttributeList::kVP9));
+
+ vp9Parameters->max_fs = fmtp->max_fs;
+ vp9Parameters->max_fr = fmtp->max_fr;
+
+ parameters.reset(vp9Parameters);
+ } break;
+ case RTP_VP8: {
+ SdpFmtpAttributeList::VP8Parameters* vp8Parameters(
+ new SdpFmtpAttributeList::VP8Parameters(
+ SdpRtpmapAttributeList::kVP8));
+
+ vp8Parameters->max_fs = fmtp->max_fs;
+ vp8Parameters->max_fr = fmtp->max_fr;
+
+ parameters.reset(vp8Parameters);
+ } break;
+ case RTP_RED: {
+ SdpFmtpAttributeList::RedParameters* redParameters(
+ new SdpFmtpAttributeList::RedParameters);
+ for (int i = 0;
+ i < SDP_FMTP_MAX_REDUNDANT_ENCODINGS && fmtp->redundant_encodings[i];
+ ++i) {
+ redParameters->encodings.push_back(fmtp->redundant_encodings[i]);
+ }
+
+ parameters.reset(redParameters);
+ } break;
+ case RTP_OPUS: {
+ SdpFmtpAttributeList::OpusParameters* opusParameters(
+ new SdpFmtpAttributeList::OpusParameters);
+ opusParameters->maxplaybackrate = fmtp->maxplaybackrate;
+ opusParameters->stereo = fmtp->stereo;
+ opusParameters->useInBandFec = fmtp->useinbandfec;
+ parameters.reset(opusParameters);
+ } break;
+ case RTP_TELEPHONE_EVENT: {
+ SdpFmtpAttributeList::TelephoneEventParameters* teParameters(
+ new SdpFmtpAttributeList::TelephoneEventParameters);
+ if (strlen(fmtp->dtmf_tones) > 0) {
+ teParameters->dtmfTones = fmtp->dtmf_tones;
+ }
+ parameters.reset(teParameters);
+ } break;
+ default: {
+ }
+ }
+
+ fmtps->PushEntry(osPayloadType.str(), Move(parameters));
+ }
+
+ if (!fmtps->mFmtps.empty()) {
+ SetAttribute(fmtps.release());
+ }
+}
+
+void
+SipccSdpAttributeList::LoadMsids(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ uint16_t attrCount = 0;
+ if (sdp_attr_num_instances(sdp, level, 0, SDP_ATTR_MSID, &attrCount) !=
+ SDP_SUCCESS) {
+ MOZ_ASSERT(false, "Unable to get count of msid attributes");
+ errorHolder.AddParseError(0, "Unable to get count of msid attributes");
+ return;
+ }
+ auto msids = MakeUnique<SdpMsidAttributeList>();
+ for (uint16_t i = 1; i <= attrCount; ++i) {
+ uint32_t lineNumber = sdp_attr_line_number(sdp, SDP_ATTR_MSID, level, 0, i);
+
+ const char* identifier = sdp_attr_get_msid_identifier(sdp, level, 0, i);
+ if (!identifier) {
+ errorHolder.AddParseError(lineNumber, "msid attribute with bad identity");
+ continue;
+ }
+
+ const char* appdata = sdp_attr_get_msid_appdata(sdp, level, 0, i);
+ if (!appdata) {
+ errorHolder.AddParseError(lineNumber, "msid attribute with bad appdata");
+ continue;
+ }
+
+ msids->PushEntry(identifier, appdata);
+ }
+
+ if (!msids->mMsids.empty()) {
+ SetAttribute(msids.release());
+ }
+}
+
+bool
+SipccSdpAttributeList::LoadRid(sdp_t* sdp,
+ uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ UniquePtr<SdpRidAttributeList> rids(new SdpRidAttributeList);
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ const char* ridRaw = sdp_attr_get_simple_string(sdp,
+ SDP_ATTR_RID,
+ level,
+ 0,
+ i);
+ if (!ridRaw) {
+ break;
+ }
+
+ std::string error;
+ size_t errorPos;
+ if (!rids->PushEntry(ridRaw, &error, &errorPos)) {
+ std::ostringstream fullError;
+ fullError << error << " at column " << errorPos;
+ errorHolder.AddParseError(
+ sdp_attr_line_number(sdp, SDP_ATTR_RID, level, 0, i),
+ fullError.str());
+ return false;
+ }
+ }
+
+ if (!rids->mRids.empty()) {
+ SetAttribute(rids.release());
+ }
+ return true;
+}
+
+void
+SipccSdpAttributeList::LoadExtmap(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ auto extmaps = MakeUnique<SdpExtmapAttributeList>();
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_EXTMAP, i);
+
+ if (!attr) {
+ break;
+ }
+
+ sdp_extmap_t* extmap = &(attr->attr.extmap);
+
+ SdpDirectionAttribute::Direction dir = SdpDirectionAttribute::kSendrecv;
+
+ if (extmap->media_direction_specified) {
+ ConvertDirection(extmap->media_direction, &dir);
+ }
+
+ extmaps->PushEntry(extmap->id, dir, extmap->media_direction_specified,
+ extmap->uri, extmap->extension_attributes);
+ }
+
+ if (!extmaps->mExtmaps.empty()) {
+ if (!AtSessionLevel() &&
+ mSessionLevel->HasAttribute(SdpAttribute::kExtmapAttribute)) {
+ uint32_t lineNumber =
+ sdp_attr_line_number(sdp, SDP_ATTR_EXTMAP, level, 0, 1);
+ errorHolder.AddParseError(
+ lineNumber, "extmap attributes in both session and media level");
+ }
+ SetAttribute(extmaps.release());
+ }
+}
+
+void
+SipccSdpAttributeList::LoadRtcpFb(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ auto rtcpfbs = MakeUnique<SdpRtcpFbAttributeList>();
+
+ for (uint16_t i = 1; i < UINT16_MAX; ++i) {
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_RTCP_FB, i);
+
+ if (!attr) {
+ break;
+ }
+
+ sdp_fmtp_fb_t* rtcpfb = &attr->attr.rtcp_fb;
+
+ SdpRtcpFbAttributeList::Type type;
+ std::string parameter;
+
+ // Set type and parameter
+ switch (rtcpfb->feedback_type) {
+ case SDP_RTCP_FB_ACK:
+ type = SdpRtcpFbAttributeList::kAck;
+ switch (rtcpfb->param.ack) {
+ // TODO: sipcc doesn't seem to support ack with no following token.
+ // Issue 189.
+ case SDP_RTCP_FB_ACK_RPSI:
+ parameter = SdpRtcpFbAttributeList::rpsi;
+ break;
+ case SDP_RTCP_FB_ACK_APP:
+ parameter = SdpRtcpFbAttributeList::app;
+ break;
+ default:
+ // Type we don't care about, ignore.
+ continue;
+ }
+ break;
+ case SDP_RTCP_FB_CCM:
+ type = SdpRtcpFbAttributeList::kCcm;
+ switch (rtcpfb->param.ccm) {
+ case SDP_RTCP_FB_CCM_FIR:
+ parameter = SdpRtcpFbAttributeList::fir;
+ break;
+ case SDP_RTCP_FB_CCM_TMMBR:
+ parameter = SdpRtcpFbAttributeList::tmmbr;
+ break;
+ case SDP_RTCP_FB_CCM_TSTR:
+ parameter = SdpRtcpFbAttributeList::tstr;
+ break;
+ case SDP_RTCP_FB_CCM_VBCM:
+ parameter = SdpRtcpFbAttributeList::vbcm;
+ break;
+ default:
+ // Type we don't care about, ignore.
+ continue;
+ }
+ break;
+ case SDP_RTCP_FB_NACK:
+ type = SdpRtcpFbAttributeList::kNack;
+ switch (rtcpfb->param.nack) {
+ case SDP_RTCP_FB_NACK_BASIC:
+ break;
+ case SDP_RTCP_FB_NACK_SLI:
+ parameter = SdpRtcpFbAttributeList::sli;
+ break;
+ case SDP_RTCP_FB_NACK_PLI:
+ parameter = SdpRtcpFbAttributeList::pli;
+ break;
+ case SDP_RTCP_FB_NACK_RPSI:
+ parameter = SdpRtcpFbAttributeList::rpsi;
+ break;
+ case SDP_RTCP_FB_NACK_APP:
+ parameter = SdpRtcpFbAttributeList::app;
+ break;
+ default:
+ // Type we don't care about, ignore.
+ continue;
+ }
+ break;
+ case SDP_RTCP_FB_TRR_INT: {
+ type = SdpRtcpFbAttributeList::kTrrInt;
+ std::ostringstream os;
+ os << rtcpfb->param.trr_int;
+ parameter = os.str();
+ } break;
+ case SDP_RTCP_FB_REMB: {
+ type = SdpRtcpFbAttributeList::kRemb;
+ } break;
+ default:
+ // Type we don't care about, ignore.
+ continue;
+ }
+
+ std::stringstream osPayloadType;
+ if (rtcpfb->payload_num == UINT16_MAX) {
+ osPayloadType << "*";
+ } else {
+ osPayloadType << rtcpfb->payload_num;
+ }
+
+ std::string pt(osPayloadType.str());
+ std::string extra(rtcpfb->extra);
+
+ rtcpfbs->PushEntry(pt, type, parameter, extra);
+ }
+
+ if (!rtcpfbs->mFeedbacks.empty()) {
+ SetAttribute(rtcpfbs.release());
+ }
+}
+
+void
+SipccSdpAttributeList::LoadRtcp(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ sdp_attr_t* attr = sdp_find_attr(sdp, level, 0, SDP_ATTR_RTCP, 1);
+
+ if (!attr) {
+ return;
+ }
+
+ sdp_rtcp_t* rtcp = &attr->attr.rtcp;
+
+ if (rtcp->nettype != SDP_NT_INTERNET) {
+ return;
+ }
+
+ if (rtcp->addrtype != SDP_AT_IP4 && rtcp->addrtype != SDP_AT_IP6) {
+ return;
+ }
+
+ if (!strlen(rtcp->addr)) {
+ SetAttribute(new SdpRtcpAttribute(rtcp->port));
+ } else {
+ SetAttribute(
+ new SdpRtcpAttribute(
+ rtcp->port,
+ sdp::kInternet,
+ rtcp->addrtype == SDP_AT_IP4 ? sdp::kIPv4 : sdp::kIPv6,
+ rtcp->addr));
+ }
+}
+
+bool
+SipccSdpAttributeList::Load(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+
+ LoadSimpleStrings(sdp, level, errorHolder);
+ LoadSimpleNumbers(sdp, level, errorHolder);
+ LoadFlags(sdp, level);
+ LoadDirection(sdp, level, errorHolder);
+
+ if (AtSessionLevel()) {
+ if (!LoadGroups(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ if (!LoadMsidSemantics(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ LoadIdentity(sdp, level);
+ LoadDtlsMessage(sdp, level);
+ } else {
+ sdp_media_e mtype = sdp_get_media_type(sdp, level);
+ if (mtype == SDP_MEDIA_APPLICATION) {
+ if (!LoadSctpmap(sdp, level, errorHolder)) {
+ return false;
+ }
+ } else {
+ if (!LoadRtpmap(sdp, level, errorHolder)) {
+ return false;
+ }
+ }
+ LoadCandidate(sdp, level);
+ LoadFmtp(sdp, level);
+ LoadMsids(sdp, level, errorHolder);
+ LoadRtcpFb(sdp, level, errorHolder);
+ LoadRtcp(sdp, level, errorHolder);
+ LoadSsrc(sdp, level);
+ if (!LoadImageattr(sdp, level, errorHolder)) {
+ return false;
+ }
+ if (!LoadSimulcast(sdp, level, errorHolder)) {
+ return false;
+ }
+ if (!LoadRid(sdp, level, errorHolder)) {
+ return false;
+ }
+ }
+
+ LoadIceAttributes(sdp, level);
+ if (!LoadFingerprint(sdp, level, errorHolder)) {
+ return false;
+ }
+ LoadSetup(sdp, level);
+ LoadExtmap(sdp, level, errorHolder);
+
+ return true;
+}
+
+bool
+SipccSdpAttributeList::IsAllowedHere(SdpAttribute::AttributeType type) const
+{
+ if (AtSessionLevel() && !SdpAttribute::IsAllowedAtSessionLevel(type)) {
+ return false;
+ }
+
+ if (!AtSessionLevel() && !SdpAttribute::IsAllowedAtMediaLevel(type)) {
+ return false;
+ }
+
+ return true;
+}
+
+void
+SipccSdpAttributeList::WarnAboutMisplacedAttribute(
+ SdpAttribute::AttributeType type, uint32_t lineNumber,
+ SdpErrorHolder& errorHolder)
+{
+ std::string warning = SdpAttribute::GetAttributeTypeString(type) +
+ (AtSessionLevel() ? " at session level. Ignoring."
+ : " at media level. Ignoring.");
+ errorHolder.AddParseError(lineNumber, warning);
+}
+
+const std::vector<std::string>&
+SipccSdpAttributeList::GetCandidate() const
+{
+ if (!HasAttribute(SdpAttribute::kCandidateAttribute)) {
+ MOZ_CRASH();
+ }
+
+ return static_cast<const SdpMultiStringAttribute*>(
+ GetAttribute(SdpAttribute::kCandidateAttribute))->mValues;
+}
+
+const SdpConnectionAttribute&
+SipccSdpAttributeList::GetConnection() const
+{
+ if (!HasAttribute(SdpAttribute::kConnectionAttribute)) {
+ MOZ_CRASH();
+ }
+
+ return *static_cast<const SdpConnectionAttribute*>(
+ GetAttribute(SdpAttribute::kConnectionAttribute));
+}
+
+SdpDirectionAttribute::Direction
+SipccSdpAttributeList::GetDirection() const
+{
+ if (!HasAttribute(SdpAttribute::kDirectionAttribute)) {
+ MOZ_CRASH();
+ }
+
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kDirectionAttribute);
+ return static_cast<const SdpDirectionAttribute*>(attr)->mValue;
+}
+
+const SdpDtlsMessageAttribute&
+SipccSdpAttributeList::GetDtlsMessage() const
+{
+ if (!HasAttribute(SdpAttribute::kDtlsMessageAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kDtlsMessageAttribute);
+ return *static_cast<const SdpDtlsMessageAttribute*>(attr);
+}
+
+const SdpExtmapAttributeList&
+SipccSdpAttributeList::GetExtmap() const
+{
+ if (!HasAttribute(SdpAttribute::kExtmapAttribute)) {
+ MOZ_CRASH();
+ }
+
+ return *static_cast<const SdpExtmapAttributeList*>(
+ GetAttribute(SdpAttribute::kExtmapAttribute));
+}
+
+const SdpFingerprintAttributeList&
+SipccSdpAttributeList::GetFingerprint() const
+{
+ if (!HasAttribute(SdpAttribute::kFingerprintAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kFingerprintAttribute);
+ return *static_cast<const SdpFingerprintAttributeList*>(attr);
+}
+
+const SdpFmtpAttributeList&
+SipccSdpAttributeList::GetFmtp() const
+{
+ if (!HasAttribute(SdpAttribute::kFmtpAttribute)) {
+ MOZ_CRASH();
+ }
+
+ return *static_cast<const SdpFmtpAttributeList*>(
+ GetAttribute(SdpAttribute::kFmtpAttribute));
+}
+
+const SdpGroupAttributeList&
+SipccSdpAttributeList::GetGroup() const
+{
+ if (!HasAttribute(SdpAttribute::kGroupAttribute)) {
+ MOZ_CRASH();
+ }
+
+ return *static_cast<const SdpGroupAttributeList*>(
+ GetAttribute(SdpAttribute::kGroupAttribute));
+}
+
+const SdpOptionsAttribute&
+SipccSdpAttributeList::GetIceOptions() const
+{
+ if (!HasAttribute(SdpAttribute::kIceOptionsAttribute)) {
+ MOZ_CRASH();
+ }
+
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kIceOptionsAttribute);
+ return *static_cast<const SdpOptionsAttribute*>(attr);
+}
+
+const std::string&
+SipccSdpAttributeList::GetIcePwd() const
+{
+ if (!HasAttribute(SdpAttribute::kIcePwdAttribute)) {
+ return kEmptyString;
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kIcePwdAttribute);
+ return static_cast<const SdpStringAttribute*>(attr)->mValue;
+}
+
+const std::string&
+SipccSdpAttributeList::GetIceUfrag() const
+{
+ if (!HasAttribute(SdpAttribute::kIceUfragAttribute)) {
+ return kEmptyString;
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kIceUfragAttribute);
+ return static_cast<const SdpStringAttribute*>(attr)->mValue;
+}
+
+const std::string&
+SipccSdpAttributeList::GetIdentity() const
+{
+ if (!HasAttribute(SdpAttribute::kIdentityAttribute)) {
+ return kEmptyString;
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kIdentityAttribute);
+ return static_cast<const SdpStringAttribute*>(attr)->mValue;
+}
+
+const SdpImageattrAttributeList&
+SipccSdpAttributeList::GetImageattr() const
+{
+ if (!HasAttribute(SdpAttribute::kImageattrAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kImageattrAttribute);
+ return *static_cast<const SdpImageattrAttributeList*>(attr);
+}
+
+const SdpSimulcastAttribute&
+SipccSdpAttributeList::GetSimulcast() const
+{
+ if (!HasAttribute(SdpAttribute::kSimulcastAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kSimulcastAttribute);
+ return *static_cast<const SdpSimulcastAttribute*>(attr);
+}
+
+const std::string&
+SipccSdpAttributeList::GetLabel() const
+{
+ if (!HasAttribute(SdpAttribute::kLabelAttribute)) {
+ return kEmptyString;
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kLabelAttribute);
+ return static_cast<const SdpStringAttribute*>(attr)->mValue;
+}
+
+uint32_t
+SipccSdpAttributeList::GetMaxptime() const
+{
+ if (!HasAttribute(SdpAttribute::kMaxptimeAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kMaxptimeAttribute);
+ return static_cast<const SdpNumberAttribute*>(attr)->mValue;
+}
+
+const std::string&
+SipccSdpAttributeList::GetMid() const
+{
+ if (!HasAttribute(SdpAttribute::kMidAttribute)) {
+ return kEmptyString;
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kMidAttribute);
+ return static_cast<const SdpStringAttribute*>(attr)->mValue;
+}
+
+const SdpMsidAttributeList&
+SipccSdpAttributeList::GetMsid() const
+{
+ if (!HasAttribute(SdpAttribute::kMsidAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kMsidAttribute);
+ return *static_cast<const SdpMsidAttributeList*>(attr);
+}
+
+const SdpMsidSemanticAttributeList&
+SipccSdpAttributeList::GetMsidSemantic() const
+{
+ if (!HasAttribute(SdpAttribute::kMsidSemanticAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kMsidSemanticAttribute);
+ return *static_cast<const SdpMsidSemanticAttributeList*>(attr);
+}
+
+const SdpRidAttributeList&
+SipccSdpAttributeList::GetRid() const
+{
+ if (!HasAttribute(SdpAttribute::kRidAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kRidAttribute);
+ return *static_cast<const SdpRidAttributeList*>(attr);
+}
+
+uint32_t
+SipccSdpAttributeList::GetPtime() const
+{
+ if (!HasAttribute(SdpAttribute::kPtimeAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kPtimeAttribute);
+ return static_cast<const SdpNumberAttribute*>(attr)->mValue;
+}
+
+const SdpRtcpAttribute&
+SipccSdpAttributeList::GetRtcp() const
+{
+ if (!HasAttribute(SdpAttribute::kRtcpAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kRtcpAttribute);
+ return *static_cast<const SdpRtcpAttribute*>(attr);
+}
+
+const SdpRtcpFbAttributeList&
+SipccSdpAttributeList::GetRtcpFb() const
+{
+ if (!HasAttribute(SdpAttribute::kRtcpFbAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kRtcpFbAttribute);
+ return *static_cast<const SdpRtcpFbAttributeList*>(attr);
+}
+
+const SdpRemoteCandidatesAttribute&
+SipccSdpAttributeList::GetRemoteCandidates() const
+{
+ MOZ_CRASH("Not yet implemented");
+}
+
+const SdpRtpmapAttributeList&
+SipccSdpAttributeList::GetRtpmap() const
+{
+ if (!HasAttribute(SdpAttribute::kRtpmapAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kRtpmapAttribute);
+ return *static_cast<const SdpRtpmapAttributeList*>(attr);
+}
+
+const SdpSctpmapAttributeList&
+SipccSdpAttributeList::GetSctpmap() const
+{
+ if (!HasAttribute(SdpAttribute::kSctpmapAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kSctpmapAttribute);
+ return *static_cast<const SdpSctpmapAttributeList*>(attr);
+}
+
+const SdpSetupAttribute&
+SipccSdpAttributeList::GetSetup() const
+{
+ if (!HasAttribute(SdpAttribute::kSetupAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kSetupAttribute);
+ return *static_cast<const SdpSetupAttribute*>(attr);
+}
+
+const SdpSsrcAttributeList&
+SipccSdpAttributeList::GetSsrc() const
+{
+ if (!HasAttribute(SdpAttribute::kSsrcAttribute)) {
+ MOZ_CRASH();
+ }
+ const SdpAttribute* attr = GetAttribute(SdpAttribute::kSsrcAttribute);
+ return *static_cast<const SdpSsrcAttributeList*>(attr);
+}
+
+const SdpSsrcGroupAttributeList&
+SipccSdpAttributeList::GetSsrcGroup() const
+{
+ MOZ_CRASH("Not yet implemented");
+}
+
+void
+SipccSdpAttributeList::Serialize(std::ostream& os) const
+{
+ for (size_t i = 0; i < kNumAttributeTypes; ++i) {
+ if (mAttributes[i]) {
+ os << *mAttributes[i];
+ }
+ }
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/sdp/SipccSdpAttributeList.h b/media/webrtc/signaling/src/sdp/SipccSdpAttributeList.h
new file mode 100644
index 000000000..62dded52e
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdpAttributeList.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SIPCCSDPATTRIBUTELIST_H_
+#define _SIPCCSDPATTRIBUTELIST_H_
+
+#include "signaling/src/sdp/SdpAttributeList.h"
+
+extern "C" {
+#include "signaling/src/sdp/sipcc/sdp.h"
+}
+
+namespace mozilla
+{
+
+class SipccSdp;
+class SipccSdpMediaSection;
+class SdpErrorHolder;
+
+class SipccSdpAttributeList : public SdpAttributeList
+{
+ friend class SipccSdpMediaSection;
+ friend class SipccSdp;
+
+public:
+ // Make sure we don't hide the default arg thunks
+ using SdpAttributeList::HasAttribute;
+ using SdpAttributeList::GetAttribute;
+
+ virtual bool HasAttribute(AttributeType type,
+ bool sessionFallback) const override;
+ virtual const SdpAttribute* GetAttribute(
+ AttributeType type, bool sessionFallback) const override;
+ virtual void SetAttribute(SdpAttribute* attr) override;
+ virtual void RemoveAttribute(AttributeType type) override;
+ virtual void Clear() override;
+
+ virtual const SdpConnectionAttribute& GetConnection() const override;
+ virtual const SdpFingerprintAttributeList& GetFingerprint() const
+ override;
+ virtual const SdpGroupAttributeList& GetGroup() const override;
+ virtual const SdpOptionsAttribute& GetIceOptions() const override;
+ virtual const SdpRtcpAttribute& GetRtcp() const override;
+ virtual const SdpRemoteCandidatesAttribute& GetRemoteCandidates() const
+ override;
+ virtual const SdpSetupAttribute& GetSetup() const override;
+ virtual const SdpSsrcAttributeList& GetSsrc() const override;
+ virtual const SdpSsrcGroupAttributeList& GetSsrcGroup() const override;
+ virtual const SdpDtlsMessageAttribute& GetDtlsMessage() const override;
+
+ // These attributes can appear multiple times, so the returned
+ // classes actually represent a collection of values.
+ virtual const std::vector<std::string>& GetCandidate() const override;
+ virtual const SdpExtmapAttributeList& GetExtmap() const override;
+ virtual const SdpFmtpAttributeList& GetFmtp() const override;
+ virtual const SdpImageattrAttributeList& GetImageattr() const override;
+ const SdpSimulcastAttribute& GetSimulcast() const override;
+ virtual const SdpMsidAttributeList& GetMsid() const override;
+ virtual const SdpMsidSemanticAttributeList& GetMsidSemantic()
+ const override;
+ const SdpRidAttributeList& GetRid() const override;
+ virtual const SdpRtcpFbAttributeList& GetRtcpFb() const override;
+ virtual const SdpRtpmapAttributeList& GetRtpmap() const override;
+ virtual const SdpSctpmapAttributeList& GetSctpmap() const override;
+
+ // These attributes are effectively simple types, so we'll make life
+ // easy by just returning their value.
+ virtual const std::string& GetIcePwd() const override;
+ virtual const std::string& GetIceUfrag() const override;
+ virtual const std::string& GetIdentity() const override;
+ virtual const std::string& GetLabel() const override;
+ virtual unsigned int GetMaxptime() const override;
+ virtual const std::string& GetMid() const override;
+ virtual unsigned int GetPtime() const override;
+
+ virtual SdpDirectionAttribute::Direction GetDirection() const override;
+
+ virtual void Serialize(std::ostream&) const override;
+
+ virtual ~SipccSdpAttributeList();
+
+private:
+ static const std::string kEmptyString;
+ static const size_t kNumAttributeTypes = SdpAttribute::kLastAttribute + 1;
+
+ // Pass a session-level attribute list if constructing a media-level one,
+ // otherwise pass nullptr
+ explicit SipccSdpAttributeList(const SipccSdpAttributeList* sessionLevel);
+
+ bool Load(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void LoadSimpleStrings(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder);
+ void LoadSimpleString(sdp_t* sdp, uint16_t level, sdp_attr_e attr,
+ AttributeType targetType, SdpErrorHolder& errorHolder);
+ void LoadSimpleNumbers(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder);
+ void LoadSimpleNumber(sdp_t* sdp, uint16_t level, sdp_attr_e attr,
+ AttributeType targetType, SdpErrorHolder& errorHolder);
+ void LoadFlags(sdp_t* sdp, uint16_t level);
+ void LoadDirection(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadRtpmap(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadSctpmap(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void LoadIceAttributes(sdp_t* sdp, uint16_t level);
+ bool LoadFingerprint(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void LoadCandidate(sdp_t* sdp, uint16_t level);
+ void LoadSetup(sdp_t* sdp, uint16_t level);
+ void LoadSsrc(sdp_t* sdp, uint16_t level);
+ bool LoadImageattr(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadSimulcast(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadGroups(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadMsidSemantics(sdp_t* sdp,
+ uint16_t level,
+ SdpErrorHolder& errorHolder);
+ void LoadIdentity(sdp_t* sdp, uint16_t level);
+ void LoadDtlsMessage(sdp_t* sdp, uint16_t level);
+ void LoadFmtp(sdp_t* sdp, uint16_t level);
+ void LoadMsids(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadRid(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void LoadExtmap(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void LoadRtcpFb(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void LoadRtcp(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ static SdpRtpmapAttributeList::CodecType GetCodecType(rtp_ptype type);
+
+ bool
+ AtSessionLevel() const
+ {
+ return !mSessionLevel;
+ }
+ bool IsAllowedHere(SdpAttribute::AttributeType type) const;
+ void WarnAboutMisplacedAttribute(SdpAttribute::AttributeType type,
+ uint32_t lineNumber,
+ SdpErrorHolder& errorHolder);
+
+ const SipccSdpAttributeList* mSessionLevel;
+
+ SdpAttribute* mAttributes[kNumAttributeTypes];
+
+ SipccSdpAttributeList(const SipccSdpAttributeList& orig) = delete;
+ SipccSdpAttributeList& operator=(const SipccSdpAttributeList& rhs) = delete;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SipccSdpMediaSection.cpp b/media/webrtc/signaling/src/sdp/SipccSdpMediaSection.cpp
new file mode 100644
index 000000000..33c2a9e0d
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdpMediaSection.cpp
@@ -0,0 +1,423 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SipccSdpMediaSection.h"
+
+#include <ostream>
+#include "signaling/src/sdp/SdpErrorHolder.h"
+
+#ifdef CRLF
+#undef CRLF
+#endif
+#define CRLF "\r\n"
+
+namespace mozilla
+{
+
+unsigned int
+SipccSdpMediaSection::GetPort() const
+{
+ return mPort;
+}
+
+void
+SipccSdpMediaSection::SetPort(unsigned int port)
+{
+ mPort = port;
+}
+
+unsigned int
+SipccSdpMediaSection::GetPortCount() const
+{
+ return mPortCount;
+}
+
+SdpMediaSection::Protocol
+SipccSdpMediaSection::GetProtocol() const
+{
+ return mProtocol;
+}
+
+const SdpConnection&
+SipccSdpMediaSection::GetConnection() const
+{
+ return *mConnection;
+}
+
+SdpConnection&
+SipccSdpMediaSection::GetConnection()
+{
+ return *mConnection;
+}
+
+uint32_t
+SipccSdpMediaSection::GetBandwidth(const std::string& type) const
+{
+ auto found = mBandwidths.find(type);
+ if (found == mBandwidths.end()) {
+ return 0;
+ }
+ return found->second;
+}
+
+const std::vector<std::string>&
+SipccSdpMediaSection::GetFormats() const
+{
+ return mFormats;
+}
+
+const SdpAttributeList&
+SipccSdpMediaSection::GetAttributeList() const
+{
+ return mAttributeList;
+}
+
+SdpAttributeList&
+SipccSdpMediaSection::GetAttributeList()
+{
+ return mAttributeList;
+}
+
+SdpDirectionAttribute
+SipccSdpMediaSection::GetDirectionAttribute() const
+{
+ return SdpDirectionAttribute(mAttributeList.GetDirection());
+}
+
+bool
+SipccSdpMediaSection::Load(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ switch (sdp_get_media_type(sdp, level)) {
+ case SDP_MEDIA_AUDIO:
+ mMediaType = kAudio;
+ break;
+ case SDP_MEDIA_VIDEO:
+ mMediaType = kVideo;
+ break;
+ case SDP_MEDIA_APPLICATION:
+ mMediaType = kApplication;
+ break;
+ case SDP_MEDIA_TEXT:
+ mMediaType = kText;
+ break;
+
+ default:
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Unsupported media section type");
+ return false;
+ }
+
+ mPort = sdp_get_media_portnum(sdp, level);
+ int32_t pc = sdp_get_media_portcount(sdp, level);
+ if (pc == SDP_INVALID_VALUE) {
+ // SDP_INVALID_VALUE (ie; -2) is used when there is no port count. :(
+ mPortCount = 0;
+ } else if (pc > static_cast<int32_t>(UINT16_MAX) || pc < 0) {
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Invalid port count");
+ return false;
+ } else {
+ mPortCount = pc;
+ }
+
+ if (!LoadProtocol(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ if (!LoadFormats(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ if (!mAttributeList.Load(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ if (!ValidateSimulcast(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ if (!mBandwidths.Load(sdp, level, errorHolder)) {
+ return false;
+ }
+
+ return LoadConnection(sdp, level, errorHolder);
+}
+
+bool
+SipccSdpMediaSection::LoadProtocol(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ switch (sdp_get_media_transport(sdp, level)) {
+ case SDP_TRANSPORT_RTPAVP:
+ mProtocol = kRtpAvp;
+ break;
+ case SDP_TRANSPORT_RTPSAVP:
+ mProtocol = kRtpSavp;
+ break;
+ case SDP_TRANSPORT_RTPAVPF:
+ mProtocol = kRtpAvpf;
+ break;
+ case SDP_TRANSPORT_RTPSAVPF:
+ mProtocol = kRtpSavpf;
+ break;
+ case SDP_TRANSPORT_UDPTLSRTPSAVP:
+ mProtocol = kUdpTlsRtpSavp;
+ break;
+ case SDP_TRANSPORT_UDPTLSRTPSAVPF:
+ mProtocol = kUdpTlsRtpSavpf;
+ break;
+ case SDP_TRANSPORT_TCPTLSRTPSAVP:
+ mProtocol = kTcpTlsRtpSavp;
+ break;
+ case SDP_TRANSPORT_TCPTLSRTPSAVPF:
+ mProtocol = kTcpTlsRtpSavpf;
+ break;
+ case SDP_TRANSPORT_DTLSSCTP:
+ mProtocol = kDtlsSctp;
+ break;
+
+ default:
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Unsupported media transport type");
+ return false;
+ }
+ return true;
+}
+
+bool
+SipccSdpMediaSection::LoadFormats(sdp_t* sdp,
+ uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ sdp_media_e mtype = sdp_get_media_type(sdp, level);
+
+ if (mtype == SDP_MEDIA_APPLICATION) {
+ uint32_t ptype = sdp_get_media_sctp_port(sdp, level);
+ std::ostringstream osPayloadType;
+ osPayloadType << ptype;
+ mFormats.push_back(osPayloadType.str());
+ } else if (mtype == SDP_MEDIA_AUDIO || mtype == SDP_MEDIA_VIDEO) {
+ uint16_t count = sdp_get_media_num_payload_types(sdp, level);
+ for (uint16_t i = 0; i < count; ++i) {
+ sdp_payload_ind_e indicator; // we ignore this, which is fine
+ uint32_t ptype =
+ sdp_get_media_payload_type(sdp, level, i + 1, &indicator);
+
+ if (GET_DYN_PAYLOAD_TYPE_VALUE(ptype) > UINT8_MAX) {
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Format is too large");
+ return false;
+ }
+
+ std::ostringstream osPayloadType;
+ // sipcc stores payload types in a funny way. When sipcc and the SDP it
+ // parsed differ on what payload type number should be used for a given
+ // codec, sipcc's value goes in the lower byte, and the SDP's value in
+ // the upper byte. When they do not differ, only the lower byte is used.
+ // We want what was in the SDP, verbatim.
+ osPayloadType << GET_DYN_PAYLOAD_TYPE_VALUE(ptype);
+ mFormats.push_back(osPayloadType.str());
+ }
+ }
+
+ return true;
+}
+
+bool
+SipccSdpMediaSection::ValidateSimulcast(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder) const
+{
+ if (!GetAttributeList().HasAttribute(SdpAttribute::kSimulcastAttribute)) {
+ return true;
+ }
+
+ const SdpSimulcastAttribute& simulcast(GetAttributeList().GetSimulcast());
+ if (!ValidateSimulcastVersions(
+ sdp, level, simulcast.sendVersions, sdp::kSend, errorHolder)) {
+ return false;
+ }
+ if (!ValidateSimulcastVersions(
+ sdp, level, simulcast.recvVersions, sdp::kRecv, errorHolder)) {
+ return false;
+ }
+ return true;
+}
+
+bool
+SipccSdpMediaSection::ValidateSimulcastVersions(
+ sdp_t* sdp,
+ uint16_t level,
+ const SdpSimulcastAttribute::Versions& versions,
+ sdp::Direction direction,
+ SdpErrorHolder& errorHolder) const
+{
+ if (versions.IsSet() && !(direction & GetDirectionAttribute().mValue)) {
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "simulcast attribute has a direction that is "
+ "inconsistent with the direction of this media "
+ "section.");
+ return false;
+ }
+
+ for (const SdpSimulcastAttribute::Version& version : versions) {
+ for (const std::string& id : version.choices) {
+ if (versions.type == SdpSimulcastAttribute::Versions::kRid) {
+ const SdpRidAttributeList::Rid* ridAttr = FindRid(id);
+ if (!ridAttr || (ridAttr->direction != direction)) {
+ std::ostringstream os;
+ os << "No rid attribute for \'" << id << "\'";
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ os.str());
+ return false;
+ }
+ } else if (versions.type == SdpSimulcastAttribute::Versions::kPt) {
+ if (std::find(mFormats.begin(), mFormats.end(), id)
+ == mFormats.end()) {
+ std::ostringstream os;
+ os << "No pt for \'" << id << "\'";
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ os.str());
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool
+SipccSdpMediaSection::LoadConnection(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder)
+{
+ if (!sdp_connection_valid(sdp, level)) {
+ level = SDP_SESSION_LEVEL;
+ if (!sdp_connection_valid(sdp, level)) {
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Missing c= line");
+ return false;
+ }
+ }
+
+ sdp_nettype_e type = sdp_get_conn_nettype(sdp, level);
+ if (type != SDP_NT_INTERNET) {
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Unsupported network type");
+ return false;
+ }
+
+ sdp::AddrType addrType;
+ switch (sdp_get_conn_addrtype(sdp, level)) {
+ case SDP_AT_IP4:
+ addrType = sdp::kIPv4;
+ break;
+ case SDP_AT_IP6:
+ addrType = sdp::kIPv6;
+ break;
+ default:
+ errorHolder.AddParseError(sdp_get_media_line_number(sdp, level),
+ "Unsupported address type");
+ return false;
+ }
+
+ std::string address = sdp_get_conn_address(sdp, level);
+ int16_t ttl = static_cast<uint16_t>(sdp_get_mcast_ttl(sdp, level));
+ if (ttl < 0) {
+ ttl = 0;
+ }
+ int32_t numAddr =
+ static_cast<uint32_t>(sdp_get_mcast_num_of_addresses(sdp, level));
+ if (numAddr < 0) {
+ numAddr = 0;
+ }
+ mConnection = MakeUnique<SdpConnection>(addrType, address, ttl, numAddr);
+ return true;
+}
+
+void
+SipccSdpMediaSection::AddCodec(const std::string& pt, const std::string& name,
+ uint32_t clockrate, uint16_t channels)
+{
+ mFormats.push_back(pt);
+
+ SdpRtpmapAttributeList* rtpmap = new SdpRtpmapAttributeList();
+ if (mAttributeList.HasAttribute(SdpAttribute::kRtpmapAttribute)) {
+ const SdpRtpmapAttributeList& old = mAttributeList.GetRtpmap();
+ for (auto it = old.mRtpmaps.begin(); it != old.mRtpmaps.end(); ++it) {
+ rtpmap->mRtpmaps.push_back(*it);
+ }
+ }
+ SdpRtpmapAttributeList::CodecType codec = SdpRtpmapAttributeList::kOtherCodec;
+ if (name == "opus") {
+ codec = SdpRtpmapAttributeList::kOpus;
+ } else if (name == "G722") {
+ codec = SdpRtpmapAttributeList::kG722;
+ } else if (name == "PCMU") {
+ codec = SdpRtpmapAttributeList::kPCMU;
+ } else if (name == "PCMA") {
+ codec = SdpRtpmapAttributeList::kPCMA;
+ } else if (name == "VP8") {
+ codec = SdpRtpmapAttributeList::kVP8;
+ } else if (name == "VP9") {
+ codec = SdpRtpmapAttributeList::kVP9;
+ } else if (name == "H264") {
+ codec = SdpRtpmapAttributeList::kH264;
+ }
+
+ rtpmap->PushEntry(pt, codec, name, clockrate, channels);
+ mAttributeList.SetAttribute(rtpmap);
+}
+
+void
+SipccSdpMediaSection::ClearCodecs()
+{
+ mFormats.clear();
+ mAttributeList.RemoveAttribute(SdpAttribute::kRtpmapAttribute);
+ mAttributeList.RemoveAttribute(SdpAttribute::kFmtpAttribute);
+ mAttributeList.RemoveAttribute(SdpAttribute::kSctpmapAttribute);
+ mAttributeList.RemoveAttribute(SdpAttribute::kRtcpFbAttribute);
+}
+
+void
+SipccSdpMediaSection::AddDataChannel(const std::string& pt,
+ const std::string& name, uint16_t streams)
+{
+ // Only one allowed, for now. This may change as the specs (and deployments)
+ // evolve.
+ mFormats.clear();
+ mFormats.push_back(pt);
+ SdpSctpmapAttributeList* sctpmap = new SdpSctpmapAttributeList();
+ sctpmap->PushEntry(pt, name, streams);
+ mAttributeList.SetAttribute(sctpmap);
+}
+
+void
+SipccSdpMediaSection::Serialize(std::ostream& os) const
+{
+ os << "m=" << mMediaType << " " << mPort;
+ if (mPortCount) {
+ os << "/" << mPortCount;
+ }
+ os << " " << mProtocol;
+ for (auto i = mFormats.begin(); i != mFormats.end(); ++i) {
+ os << " " << (*i);
+ }
+ os << CRLF;
+
+ // We dont do i=
+
+ if (mConnection) {
+ os << *mConnection;
+ }
+
+ mBandwidths.Serialize(os);
+
+ // We dont do k= because they're evil
+
+ os << mAttributeList;
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/sdp/SipccSdpMediaSection.h b/media/webrtc/signaling/src/sdp/SipccSdpMediaSection.h
new file mode 100644
index 000000000..6d2dafa7b
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdpMediaSection.h
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SIPCCSDPMEDIASECTION_H_
+#define _SIPCCSDPMEDIASECTION_H_
+
+#include "mozilla/Attributes.h"
+#include "mozilla/UniquePtr.h"
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/sdp/SipccSdpAttributeList.h"
+
+#include <map>
+
+extern "C" {
+#include "signaling/src/sdp/sipcc/sdp.h"
+}
+
+namespace mozilla
+{
+
+class SipccSdp;
+class SdpErrorHolder;
+
+class SipccSdpBandwidths final : public std::map<std::string, uint32_t>
+{
+public:
+ bool Load(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ void Serialize(std::ostream& os) const;
+};
+
+class SipccSdpMediaSection final : public SdpMediaSection
+{
+ friend class SipccSdp;
+
+public:
+ ~SipccSdpMediaSection() {}
+
+ virtual MediaType
+ GetMediaType() const override
+ {
+ return mMediaType;
+ }
+
+ virtual unsigned int GetPort() const override;
+ virtual void SetPort(unsigned int port) override;
+ virtual unsigned int GetPortCount() const override;
+ virtual Protocol GetProtocol() const override;
+ virtual const SdpConnection& GetConnection() const override;
+ virtual SdpConnection& GetConnection() override;
+ virtual uint32_t GetBandwidth(const std::string& type) const override;
+ virtual const std::vector<std::string>& GetFormats() const override;
+
+ virtual const SdpAttributeList& GetAttributeList() const override;
+ virtual SdpAttributeList& GetAttributeList() override;
+ virtual SdpDirectionAttribute GetDirectionAttribute() const override;
+
+ virtual void AddCodec(const std::string& pt, const std::string& name,
+ uint32_t clockrate, uint16_t channels) override;
+ virtual void ClearCodecs() override;
+
+ virtual void AddDataChannel(const std::string& pt, const std::string& name,
+ uint16_t streams) override;
+
+ virtual void Serialize(std::ostream&) const override;
+
+private:
+ SipccSdpMediaSection(size_t level, const SipccSdpAttributeList* sessionLevel)
+ : SdpMediaSection(level), mAttributeList(sessionLevel)
+ {
+ }
+
+ bool Load(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadConnection(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadProtocol(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool LoadFormats(sdp_t* sdp, uint16_t level, SdpErrorHolder& errorHolder);
+ bool ValidateSimulcast(sdp_t* sdp, uint16_t level,
+ SdpErrorHolder& errorHolder) const;
+ bool ValidateSimulcastVersions(
+ sdp_t* sdp,
+ uint16_t level,
+ const SdpSimulcastAttribute::Versions& versions,
+ sdp::Direction direction,
+ SdpErrorHolder& errorHolder) const;
+
+ // the following values are cached on first get
+ MediaType mMediaType;
+ uint16_t mPort;
+ uint16_t mPortCount;
+ Protocol mProtocol;
+ std::vector<std::string> mFormats;
+
+ UniquePtr<SdpConnection> mConnection;
+ SipccSdpBandwidths mBandwidths;
+
+ SipccSdpAttributeList mAttributeList;
+};
+}
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/SipccSdpParser.cpp b/media/webrtc/signaling/src/sdp/SipccSdpParser.cpp
new file mode 100644
index 000000000..04fea305c
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdpParser.cpp
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "signaling/src/sdp/SipccSdpParser.h"
+#include "signaling/src/sdp/SipccSdp.h"
+
+#include <utility>
+extern "C" {
+#include "signaling/src/sdp/sipcc/sdp.h"
+}
+
+namespace mozilla
+{
+
+extern "C" {
+
+void
+sipcc_sdp_parser_error_handler(void *context, uint32_t line,
+ const char *message)
+{
+ SdpErrorHolder *errorHolder = static_cast<SdpErrorHolder *>(context);
+ std::string err(message);
+ errorHolder->AddParseError(line, err);
+}
+
+} // extern "C"
+
+UniquePtr<Sdp>
+SipccSdpParser::Parse(const std::string &sdpText)
+{
+ ClearParseErrors();
+
+ sdp_conf_options_t *sipcc_config = sdp_init_config();
+ if (!sipcc_config) {
+ return UniquePtr<Sdp>();
+ }
+
+ sdp_nettype_supported(sipcc_config, SDP_NT_INTERNET, true);
+ sdp_addrtype_supported(sipcc_config, SDP_AT_IP4, true);
+ sdp_addrtype_supported(sipcc_config, SDP_AT_IP6, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_RTPAVP, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_RTPAVPF, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_RTPSAVP, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_RTPSAVPF, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_UDPTLSRTPSAVP, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_UDPTLSRTPSAVPF, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_TCPTLSRTPSAVP, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_TCPTLSRTPSAVPF, true);
+ sdp_transport_supported(sipcc_config, SDP_TRANSPORT_DTLSSCTP, true);
+ sdp_require_session_name(sipcc_config, false);
+
+ sdp_config_set_error_handler(sipcc_config, &sipcc_sdp_parser_error_handler,
+ this);
+
+ // Takes ownership of |sipcc_config| iff it succeeds
+ sdp_t *sdp = sdp_init_description(sipcc_config);
+ if (!sdp) {
+ sdp_free_config(sipcc_config);
+ return UniquePtr<Sdp>();
+ }
+
+ const char *rawString = sdpText.c_str();
+ sdp_result_e sdpres = sdp_parse(sdp, rawString, sdpText.length());
+ if (sdpres != SDP_SUCCESS) {
+ sdp_free_description(sdp);
+ return UniquePtr<Sdp>();
+ }
+
+ UniquePtr<SipccSdp> sipccSdp(new SipccSdp);
+
+ bool success = sipccSdp->Load(sdp, *this);
+ sdp_free_description(sdp);
+ if (!success) {
+ return UniquePtr<Sdp>();
+ }
+
+ return UniquePtr<Sdp>(Move(sipccSdp));
+}
+
+} // namespace mozilla
diff --git a/media/webrtc/signaling/src/sdp/SipccSdpParser.h b/media/webrtc/signaling/src/sdp/SipccSdpParser.h
new file mode 100644
index 000000000..f28780ddf
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/SipccSdpParser.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SIPCCSDPPARSER_H_
+#define _SIPCCSDPPARSER_H_
+
+#include <string>
+
+#include "mozilla/UniquePtr.h"
+
+#include "signaling/src/sdp/Sdp.h"
+#include "signaling/src/sdp/SdpErrorHolder.h"
+
+namespace mozilla
+{
+
+class SipccSdpParser final : public SdpErrorHolder
+{
+public:
+ SipccSdpParser() {}
+ virtual ~SipccSdpParser() {}
+
+ /**
+ * This parses the provided text into an SDP object.
+ * This returns a nullptr-valued pointer if things go poorly.
+ */
+ UniquePtr<Sdp> Parse(const std::string& sdpText);
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/ccsdp.h b/media/webrtc/signaling/src/sdp/sipcc/ccsdp.h
new file mode 100644
index 000000000..8a04d8f0b
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/ccsdp.h
@@ -0,0 +1,207 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __CCSDP_H__
+#define __CCSDP_H__
+
+#include "cpr_types.h"
+#include "ccsdp_rtcp_fb.h"
+
+#define SIPSDP_ILBC_MODE20 20
+
+/**
+ * Return codes for sdp helper APIs
+ */
+typedef enum rtp_ptype_
+{
+ RTP_NONE = -1,
+ RTP_PCMU = 0,
+ RTP_CELP = 1,
+ RTP_G726 = 2,
+ RTP_GSM = 3,
+ RTP_G723 = 4,
+ RTP_DVI4 = 5,
+ RTP_DVI4_II = 6,
+ RTP_LPC = 7,
+ RTP_PCMA = 8,
+ RTP_G722 = 9,
+ RTP_G728 = 15,
+ RTP_G729 = 18,
+ RTP_JPEG = 26,
+ RTP_NV = 28,
+ RTP_H261 = 31,
+ RTP_H264_P0 = 97,
+ RTP_H264_P1 = 126,
+ RTP_TELEPHONE_EVENT = 101,
+ RTP_L16 = 102,
+ RTP_H263 = 103,
+ RTP_ILBC = 116, /* used only to make an offer */
+ RTP_OPUS = 109,
+ RTP_VP8 = 120,
+ RTP_VP9 = 121,
+ RTP_RED = 122,
+ RTP_ULPFEC = 123,
+ RTP_I420 = 124,
+ RTP_ISAC = 124
+} rtp_ptype;
+
+typedef struct {
+ const char *name;
+ int value;
+} ccsdp_key_table_entry_t;
+
+typedef enum max_coded_audio_bandwidth_ {
+ opus_nb = 0, /* Narrowband */
+ opus_mb = 1, /* Mediumband */
+ opus_wb = 2, /* Wideband */
+ opus_swb = 3, /* Super-wideband */
+ opus_fb = 4 /* Fullband */
+} max_coded_audio_bandwidth;
+
+static const ccsdp_key_table_entry_t max_coded_audio_bandwidth_table[] = {
+ {"nb", opus_nb},
+ {"mb", opus_mb},
+ {"wb", opus_wb},
+ {"swb", opus_swb},
+ {"fb", opus_fb}
+};
+
+typedef enum {
+ SDP_SUCCESS, /**< Success */
+ SDP_FAILURE,
+ SDP_INVALID_SDP_PTR,
+ SDP_NOT_SDP_DESCRIPTION,
+ SDP_INVALID_TOKEN_ORDERING,
+ SDP_INVALID_PARAMETER,
+ SDP_INVALID_MEDIA_LEVEL,
+ SDP_INVALID_CAPABILITY,
+ SDP_NO_RESOURCE,
+ SDP_UNRECOGNIZED_TOKEN,
+ SDP_NULL_BUF_PTR,
+ SDP_POTENTIAL_SDP_OVERFLOW,
+ SDP_EMPTY_TOKEN,
+ SDP_MAX_RC
+} sdp_result_e;
+
+/**
+ * Indicates invalid bandwidth value
+ */
+#define SDP_INVALID_VALUE (-2)
+
+/**
+ * Bandwidth modifier type for b= SDP line
+ */
+typedef enum {
+ SDP_BW_MODIFIER_INVALID = -1,
+ SDP_BW_MODIFIER_AS, /** < b=AS: */
+ SDP_BW_MODIFIER_CT, /** < b=CT: */
+ SDP_BW_MODIFIER_TIAS, /** < b=TIAS: */
+ SDP_MAX_BW_MODIFIER_VAL,
+ SDP_BW_MODIFIER_UNSUPPORTED
+} sdp_bw_modifier_e;
+
+/**
+ * SDP attribute types
+ */
+/* Attribute Types */
+typedef enum {
+ SDP_ATTR_BEARER = 0,
+ SDP_ATTR_CALLED,
+ SDP_ATTR_CONN_TYPE,
+ SDP_ATTR_DIALED,
+ SDP_ATTR_DIALING,
+ SDP_ATTR_DIRECTION,
+ SDP_ATTR_EECID,
+ SDP_ATTR_FMTP,
+ SDP_ATTR_SCTPMAP,
+ SDP_ATTR_FRAMING,
+ SDP_ATTR_INACTIVE,
+ SDP_ATTR_PTIME,
+ SDP_ATTR_QOS,
+ SDP_ATTR_CURR,
+ SDP_ATTR_DES,
+ SDP_ATTR_CONF,
+ SDP_ATTR_RECVONLY,
+ SDP_ATTR_RTPMAP,
+ SDP_ATTR_SECURE,
+ SDP_ATTR_SENDONLY,
+ SDP_ATTR_SENDRECV,
+ SDP_ATTR_SUBNET,
+ SDP_ATTR_T38_VERSION,
+ SDP_ATTR_T38_MAXBITRATE,
+ SDP_ATTR_T38_FILLBITREMOVAL,
+ SDP_ATTR_T38_TRANSCODINGMMR,
+ SDP_ATTR_T38_TRANSCODINGJBIG,
+ SDP_ATTR_T38_RATEMGMT,
+ SDP_ATTR_T38_MAXBUFFER,
+ SDP_ATTR_T38_MAXDGRAM,
+ SDP_ATTR_T38_UDPEC,
+ SDP_ATTR_X_CAP,
+ SDP_ATTR_X_CPAR,
+ SDP_ATTR_X_PC_CODEC,
+ SDP_ATTR_X_PC_QOS,
+ SDP_ATTR_X_QOS,
+ SDP_ATTR_X_SQN,
+ SDP_ATTR_TMRGWXID,
+ SDP_ATTR_TC1_PAYLOAD_BYTES,
+ SDP_ATTR_TC1_WINDOW_SIZE,
+ SDP_ATTR_TC2_PAYLOAD_BYTES,
+ SDP_ATTR_TC2_WINDOW_SIZE,
+ SDP_ATTR_RTCP,
+ SDP_ATTR_RTR,
+ SDP_ATTR_SILENCESUPP,
+ SDP_ATTR_SRTP_CONTEXT, /* version 2 sdescriptions */
+ SDP_ATTR_MPTIME,
+ SDP_ATTR_X_SIDIN,
+ SDP_ATTR_X_SIDOUT,
+ SDP_ATTR_X_CONFID,
+ SDP_ATTR_GROUP,
+ SDP_ATTR_MID,
+ SDP_ATTR_SOURCE_FILTER,
+ SDP_ATTR_RTCP_UNICAST,
+ SDP_ATTR_MAXPRATE,
+ SDP_ATTR_SQN,
+ SDP_ATTR_CDSC,
+ SDP_ATTR_CPAR,
+ SDP_ATTR_SPRTMAP,
+ SDP_ATTR_SDESCRIPTIONS, /* version 9 sdescriptions */
+ SDP_ATTR_LABEL,
+ SDP_ATTR_FRAMERATE,
+ SDP_ATTR_ICE_CANDIDATE,
+ SDP_ATTR_ICE_UFRAG,
+ SDP_ATTR_ICE_PWD,
+ SDP_ATTR_ICE_LITE,
+ SDP_ATTR_RTCP_MUX,
+ SDP_ATTR_DTLS_FINGERPRINT,
+ SDP_ATTR_MAXPTIME,
+ SDP_ATTR_RTCP_FB, /* RFC 4585 */
+ SDP_ATTR_SETUP,
+ SDP_ATTR_CONNECTION,
+ SDP_ATTR_EXTMAP, /* RFC 5285 */
+ SDP_ATTR_IDENTITY,
+ SDP_ATTR_MSID,
+ SDP_ATTR_MSID_SEMANTIC,
+ SDP_ATTR_BUNDLE_ONLY,
+ SDP_ATTR_END_OF_CANDIDATES,
+ SDP_ATTR_ICE_OPTIONS,
+ SDP_ATTR_SSRC,
+ SDP_ATTR_IMAGEATTR,
+ SDP_ATTR_SIMULCAST,
+ SDP_ATTR_RID,
+ SDP_ATTR_DTLS_MESSAGE,
+ SDP_MAX_ATTR_TYPES,
+ SDP_ATTR_INVALID
+} sdp_attr_e;
+
+typedef enum {
+ SDP_SETUP_NOT_FOUND = -1,
+ SDP_SETUP_ACTIVE = 0,
+ SDP_SETUP_PASSIVE,
+ SDP_SETUP_ACTPASS,
+ SDP_SETUP_HOLDCONN,
+ SDP_MAX_SETUP,
+ SDP_SETUP_UNKNOWN
+} sdp_setup_type_e;
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/ccsdp_rtcp_fb.h b/media/webrtc/signaling/src/sdp/sipcc/ccsdp_rtcp_fb.h
new file mode 100644
index 000000000..5518521ba
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/ccsdp_rtcp_fb.h
@@ -0,0 +1,63 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __CCSDP_RTCP_FB_H__
+#define __CCSDP_RTCP_FB_H__
+
+/* a=rtcp-fb enumerations */
+
+typedef enum {
+ SDP_RTCP_FB_ANY = -1,
+ SDP_RTCP_FB_ACK = 0,
+ SDP_RTCP_FB_CCM,
+ SDP_RTCP_FB_NACK,
+ SDP_RTCP_FB_TRR_INT,
+ // from https://www.ietf.org/archive/id/draft-alvestrand-rmcat-remb-03.txt
+ SDP_RTCP_FB_REMB,
+ SDP_MAX_RTCP_FB,
+ SDP_RTCP_FB_UNKNOWN
+} sdp_rtcp_fb_type_e;
+
+typedef enum {
+ SDP_RTCP_FB_NACK_NOT_FOUND = -1,
+ SDP_RTCP_FB_NACK_BASIC = 0,
+ SDP_RTCP_FB_NACK_SLI,
+ SDP_RTCP_FB_NACK_PLI,
+ SDP_RTCP_FB_NACK_RPSI,
+ SDP_RTCP_FB_NACK_APP,
+ SDP_RTCP_FB_NACK_RAI,
+ SDP_RTCP_FB_NACK_TLLEI,
+ SDP_RTCP_FB_NACK_PSLEI,
+ SDP_RTCP_FB_NACK_ECN,
+ SDP_MAX_RTCP_FB_NACK,
+ SDP_RTCP_FB_NACK_UNKNOWN
+} sdp_rtcp_fb_nack_type_e;
+
+typedef enum {
+ SDP_RTCP_FB_ACK_NOT_FOUND = -1,
+ SDP_RTCP_FB_ACK_RPSI = 0,
+ SDP_RTCP_FB_ACK_APP,
+ SDP_MAX_RTCP_FB_ACK,
+ SDP_RTCP_FB_ACK_UNKNOWN
+} sdp_rtcp_fb_ack_type_e;
+
+// Codec Control Messages - defined by RFC 5104
+typedef enum {
+ SDP_RTCP_FB_CCM_NOT_FOUND = -1,
+ SDP_RTCP_FB_CCM_FIR = 0,
+ SDP_RTCP_FB_CCM_TMMBR,
+ SDP_RTCP_FB_CCM_TSTR,
+ SDP_RTCP_FB_CCM_VBCM,
+ SDP_MAX_RTCP_FB_CCM,
+ SDP_RTCP_FB_CCM_UNKNOWN
+} sdp_rtcp_fb_ccm_type_e;
+
+#ifdef __cplusplus
+static_assert(SDP_MAX_RTCP_FB_NACK +
+ SDP_MAX_RTCP_FB_ACK +
+ SDP_MAX_RTCP_FB_CCM < 32,
+ "rtcp-fb Bitmap is larger than 32 bits");
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_darwin_types.h b/media/webrtc/signaling/src/sdp/sipcc/cpr_darwin_types.h
new file mode 100644
index 000000000..97a2644b5
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_darwin_types.h
@@ -0,0 +1,68 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CPR_DARWIN_TYPES_H_
+#define _CPR_DARWIN_TYPES_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <stddef.h>
+#include "inttypes.h"
+
+
+/**
+ * @typedef boolean
+ *
+ * Define boolean as an unsigned byte
+ *
+ * @note There are differences within TNP header files
+ * @li curses.h: bool => char
+ * @li types.h: boolean_t => enum
+ * @li dki_lock.h: bool_t => int
+ */
+typedef uint8_t boolean;
+
+/*
+ * Define min/max
+ * defined in param.h
+ *
+ * The GNU versions of the MAX and MIN macros do two things better than
+ * the old versions:
+ * 1. they are more optimal as they only evaluate a & b once by creating a
+ * a variable of each type on the local stack.
+ * 2. they fix potential errors due to side-effects where a and b were
+ * evaluated twice, i.e. MIN(i++,j++)
+ *
+ * @note b could be cast to a's type, to help with usage where the code
+ * compares signed and unsigned types.
+ */
+#ifndef MIN
+#ifdef __GNUC__
+#define MIN(a,b) ({ typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
+#else
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+#endif
+
+#ifndef MAX
+#ifdef __GNUC__
+#define MAX(a,b) ({ typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
+#else
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+#endif
+
+/**
+ * Define TRUE/FALSE
+ * defined in several header files
+ */
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_linux_types.h b/media/webrtc/signaling/src/sdp/sipcc/cpr_linux_types.h
new file mode 100644
index 000000000..78f05f413
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_linux_types.h
@@ -0,0 +1,82 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CPR_LINUX_TYPES_H_
+#define _CPR_LINUX_TYPES_H_
+
+#include "sys/types.h"
+#include "stddef.h"
+#include "inttypes.h"
+
+/**
+ * @typedef boolean
+ *
+ * Define boolean as an unsigned byte
+ *
+ * @note There are differences within TNP header files
+ * @li curses.h: bool => char
+ * @li types.h: boolean_t => enum
+ * @li dki_lock.h: bool_t => int
+ */
+typedef uint8_t boolean;
+
+/*
+ * Define size_t
+ * defined in numerous header files
+ */
+/* DONE (sys/types.h => unsigned int) */
+
+/*
+ * Define ssize_t
+ */
+/* DONE (sys/types.h => int) */
+
+/*
+ * Define MIN/MAX
+ * defined in param.h
+ *
+ * The GNU versions of the MAX and MIN macros do two things better than
+ * the old versions:
+ * 1. they are more optimal as they only evaluate a & b once by creating a
+ * a variable of each type on the local stack.
+ * 2. they fix potential errors due to side-effects where a and b were
+ * evaluated twice, i.e. MIN(i++,j++)
+ *
+ * @note b could be cast to a's type, to help with usage where the code
+ * compares signed and unsigned types.
+ */
+#ifndef MIN
+#ifdef __GNUC__
+#define MIN(a,b) ({ typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
+#else
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+#endif
+
+#ifndef MAX
+#ifdef __GNUC__
+#define MAX(a,b) ({ typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
+#else
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+#endif
+
+/**
+ * Define TRUE/FALSE
+ * defined in several header files
+ */
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/*
+ * Define offsetof
+ */
+/* DONE (stddef.h) */
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_string.c b/media/webrtc/signaling/src/sdp/sipcc/cpr_string.c
new file mode 100644
index 000000000..c210c3971
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_string.c
@@ -0,0 +1,272 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdarg.h>
+
+#include "mozilla/Assertions.h"
+#include "cpr_types.h"
+#include "cpr_string.h"
+#include "cpr_strings.h"
+
+/* From cpr_stdlib.h */
+#include "mozilla/mozalloc.h"
+
+#define cpr_malloc(a) moz_xmalloc(a)
+#define cpr_calloc(a, b) moz_xcalloc(a, b)
+#define cpr_realloc(a, b) moz_xrealloc(a, b)
+#define cpr_free(a) free(a)
+
+
+/**
+ * sstrncpy
+ *
+ * This is Cisco's *safe* version of strncpy. The string will always
+ * be NUL terminated (which is not ANSI compliant).
+ *
+ * Parameters: s1 - first string
+ * s2 - second string
+ * max - maximum length in octets to concat.
+ *
+ * Return: Pointer to the *end* of the string
+ *
+ * Remarks: Modified to be explicitly safe for all inputs.
+ * Also return the number of characters copied excluding the
+ * NUL terminator vs. the original string s1. This simplifies
+ * code where sstrncat functions follow.
+ */
+unsigned long
+sstrncpy (char *dst, const char *src, unsigned long max)
+{
+ unsigned long cnt = 0;
+
+ if (dst == NULL) {
+ return 0;
+ }
+
+ if (src) {
+ while ((max-- > 1) && (*src)) {
+ *dst = *src;
+ dst++;
+ src++;
+ cnt++;
+ }
+ }
+
+#if defined(CPR_SSTRNCPY_PAD)
+ /*
+ * To be equivalent to the TI compiler version
+ * v2.01, SSTRNCPY_PAD needs to be defined
+ */
+ while (max-- > 1) {
+ *dst = '\0';
+ dst++;
+ }
+#endif
+ *dst = '\0';
+
+ return cnt;
+}
+
+/**
+ * sstrncat
+ *
+ * This is Cisco's *safe* version of strncat. The string will always
+ * be NUL terminated (which is not ANSI compliant).
+ *
+ * Parameters: s1 - first string
+ * s2 - second string
+ * max - maximum length in octets to concatenate
+ *
+ * Return: Pointer to the *end* of the string
+ *
+ * Remarks: Modified to be explicitly safe for all inputs.
+ * Also return the end vs. the beginning of the string s1
+ * which is useful for multiple sstrncat calls.
+ */
+char *
+sstrncat (char *s1, const char *s2, unsigned long max)
+{
+ if (s1 == NULL)
+ return (char *) NULL;
+
+ while (*s1)
+ s1++;
+
+ if (s2) {
+ while ((max-- > 1) && (*s2)) {
+ *s1 = *s2;
+ s1++;
+ s2++;
+ }
+ }
+ *s1 = '\0';
+
+ return s1;
+}
+
+/*
+ * flex_string
+ */
+
+/*
+ * flex_string_init
+ *
+ * Not thread-safe
+ */
+void flex_string_init(flex_string *fs) {
+ fs->buffer_length = FLEX_STRING_CHUNK_SIZE;
+ fs->string_length = 0;
+ fs->buffer = cpr_malloc(fs->buffer_length);
+ fs->buffer[0] = '\0';
+}
+
+/*
+ * flex_string_free
+ *
+ * Not thread-safe
+ */
+void flex_string_free(flex_string *fs) {
+ fs->buffer_length = 0;
+ fs->string_length = 0;
+ cpr_free(fs->buffer);
+ fs->buffer = NULL;
+}
+
+/* For sanity check before alloc */
+#define FLEX_STRING_MAX_SIZE (10 * 1024 * 1024) /* 10MB */
+
+/*
+ * flex_string_check_alloc
+ *
+ * Allocate enough chunks to hold the new minimum size.
+ *
+ * Not thread-safe
+ */
+void flex_string_check_alloc(flex_string *fs, size_t new_min_length) {
+ if (new_min_length > fs->buffer_length) {
+ /* Oversize, allocate more */
+
+ /* Sanity check on allocation size */
+ if (new_min_length > FLEX_STRING_MAX_SIZE) {
+ MOZ_CRASH();
+ }
+
+ /* Alloc to nearest chunk */
+ fs->buffer_length = (((new_min_length - 1) / FLEX_STRING_CHUNK_SIZE) + 1) * FLEX_STRING_CHUNK_SIZE;
+
+ fs->buffer = cpr_realloc(fs->buffer, fs->buffer_length);
+ }
+}
+
+/*
+ * flex_string_append
+ *
+ * Not thread-safe
+ */
+void flex_string_append(flex_string *fs, const char *more) {
+ fs->string_length += strlen(more);
+
+ flex_string_check_alloc(fs, fs->string_length + 1);
+
+ sstrncat(fs->buffer, more, fs->buffer_length - strlen(fs->buffer));
+}
+
+/*
+ * va_copy is part of the C99 spec but MSVC doesn't have it.
+ */
+#ifndef va_copy
+#define va_copy(d,s) ((d) = (s))
+#endif
+
+/*
+ * flex_string_vsprintf
+ *
+ * Not thread-safe
+ */
+void flex_string_vsprintf(flex_string *fs, const char *format, va_list original_ap) {
+ va_list ap;
+ int vsnprintf_result;
+
+ va_copy(ap, original_ap);
+ vsnprintf_result = vsnprintf(fs->buffer + fs->string_length, fs->buffer_length - fs->string_length, format, ap);
+ va_end(ap);
+
+ /* Special case just for Windows where vsnprintf is broken
+ and returns -1 if buffer too large unless you size it 0. */
+ if (vsnprintf_result < 0) {
+ va_copy(ap, original_ap);
+ vsnprintf_result = vsnprintf(NULL, 0, format, ap);
+ va_end(ap);
+ }
+
+ if (fs->string_length + vsnprintf_result >= fs->buffer_length) {
+ /* Buffer overflow, resize */
+ flex_string_check_alloc(fs, fs->string_length + vsnprintf_result + 1);
+
+ /* Try again with new buffer */
+ va_copy(ap, original_ap);
+ vsnprintf_result = vsnprintf(fs->buffer + fs->string_length, fs->buffer_length - fs->string_length, format, ap);
+ va_end(ap);
+ MOZ_ASSERT(vsnprintf_result > 0 &&
+ (size_t)vsnprintf_result < (fs->buffer_length - fs->string_length));
+ }
+
+ if (vsnprintf_result > 0) {
+ fs->string_length += vsnprintf_result;
+ }
+}
+
+/*
+ * flex_string_sprintf
+ *
+ * Not thread-safe
+ */
+void flex_string_sprintf(flex_string *fs, const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ flex_string_vsprintf(fs, format, ap);
+ va_end(ap);
+}
+
+
+
+/* From cpr_linux_string.c */
+/**
+ * cpr_strdup
+ *
+ * @brief The CPR wrapper for strdup
+
+ * The cpr_strdup shall return a pointer to a new string, which is a duplicate
+ * of the string pointed to by "str" argument. A null pointer is returned if the
+ * new string cannot be created.
+ *
+ * @param[in] str - The string that needs to be duplicated
+ *
+ * @return The duplicated string or NULL in case of no memory
+ *
+ */
+char *
+cpr_strdup (const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str) {
+ return (char *) NULL;
+ }
+
+ len = strlen(str);
+ if (len == 0) {
+ return (char *) NULL;
+ }
+ len++;
+
+ dup = cpr_malloc(len * sizeof(char));
+ if (!dup) {
+ return (char *) NULL;
+ }
+ (void) memcpy(dup, str, len);
+ return dup;
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_string.h b/media/webrtc/signaling/src/sdp/sipcc/cpr_string.h
new file mode 100644
index 000000000..de6b1cc8a
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_string.h
@@ -0,0 +1,139 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CPR_STRING_H_
+#define _CPR_STRING_H_
+
+#include <stdarg.h>
+
+#include "cpr_types.h"
+#include "cpr_strings.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * sstrncpy
+ *
+ * @brief The CPR wrapper for strncpy
+ *
+ * This is Cisco's *safe* version of strncpy. The string will always
+ * be NUL terminated (which is not ANSI compliant).
+ *
+ * @param[in] dst - The destination string
+ * @param[in] src - The source
+ * @param[in] max - maximum length in octets to concatenate
+ *
+ * @return Pointer to the @b end of the string
+ *
+ * @note Modified to be explicitly safe for all inputs.
+ * Also return the number of characters copied excluding the
+ * NUL terminator vs. the original string s1. This simplifies
+ * code where sstrncat functions follow.
+ */
+unsigned long
+sstrncpy(char *dst, const char *src, unsigned long max);
+
+
+/**
+ * sstrncat
+ *
+ * @brief The CPR wrapper for strncat
+ *
+ * This is Cisco's *safe* version of strncat. The string will always
+ * be NUL terminated (which is not ANSI compliant).
+ *
+ * @param[in] s1 - first string
+ * @param[in] s2 - second string
+ * @param[in] max - maximum length in octets to concatenate
+ *
+ * @return Pointer to the @b end of the string
+ *
+ * @note Modified to be explicitly safe for all inputs.
+ * Also return the end vs. the beginning of the string s1
+ * which is useful for multiple sstrncat calls.
+ */
+char *
+sstrncat(char *s1, const char *s2, unsigned long max);
+
+
+/*
+ * flex_string
+ */
+#define FLEX_STRING_CHUNK_SIZE 256
+
+typedef struct {
+ char *buffer;
+ size_t buffer_length;
+ size_t string_length;
+} flex_string;
+
+/*
+ * flex_string_init
+ *
+ * Not thread-safe
+ */
+void flex_string_init(flex_string *fs);
+
+/*
+ * flex_string_free
+ *
+ * Not thread-safe
+ */
+void flex_string_free(flex_string *fs);
+
+/*
+ * flex_string_check_alloc
+ *
+ * Allocate enough chunks to hold the new minimum size.
+ *
+ * Not thread-safe
+ */
+void flex_string_check_alloc(flex_string *fs, size_t new_min_length);
+
+/*
+ * flex_string_append
+ *
+ * Not thread-safe
+ */
+void flex_string_append(flex_string *fs, const char *more);
+
+/*
+ * flex_string_sprintf
+ *
+ * Not thread-safe
+ */
+void flex_string_vsprintf(flex_string *fs, const char *format, va_list original_ap);
+
+/*
+ * flex_string_sprintf
+ *
+ * Not thread-safe
+ */
+void flex_string_sprintf(flex_string *fs, const char *format, ...);
+
+
+/* From cpr_linux_string.h */
+/* cpr_strdup
+ *
+ * @brief The CPR wrapper for strdup
+
+ * The cpr_strdup shall return a pointer to a new string, which is a duplicate
+ * of the string pointed to by "str" argument. A null pointer is returned if the
+ * new string cannot be created.
+ *
+ * @param[in] str - The string that needs to be duplicated
+ *
+ * @return The duplicated string or NULL in case of no memory
+ *
+ */
+char *
+cpr_strdup(const char *str);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_strings.h b/media/webrtc/signaling/src/sdp/sipcc/cpr_strings.h
new file mode 100644
index 000000000..2d18d4638
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_strings.h
@@ -0,0 +1,22 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CPR_STRINGS_H_
+#define _CPR_STRINGS_H_
+
+#include "cpr_types.h"
+
+#include <string.h>
+
+#if defined(_MSC_VER)
+#define cpr_strcasecmp _stricmp
+#define cpr_strncasecmp _strnicmp
+#else // _MSC_VER
+
+#define cpr_strcasecmp strcasecmp
+#define cpr_strncasecmp strncasecmp
+
+#endif // _MSC_VER
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_types.h b/media/webrtc/signaling/src/sdp/sipcc/cpr_types.h
new file mode 100644
index 000000000..808067e61
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_types.h
@@ -0,0 +1,126 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CPR_TYPES_H_
+#define _CPR_TYPES_H_
+
+#if defined SIP_OS_LINUX
+#include "cpr_linux_types.h"
+#elif defined SIP_OS_WINDOWS
+#include "cpr_win_types.h"
+#elif defined SIP_OS_OSX
+#include "cpr_darwin_types.h"
+#else
+//lol
+//#error "Unsupported platform"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * CPR Return Codes
+ */
+typedef enum
+{
+ CPR_SUCCESS,
+ CPR_FAILURE
+} cpr_status_e;
+typedef cpr_status_e cprRC_t;
+
+/*
+ * IPv4 address structure
+ */
+typedef uint32_t cpr_in_addr_t;
+
+struct in_addr_s
+{
+#ifdef s_addr
+ /* can occur with Windows winsock.h */
+ union {
+ struct {
+ unsigned char s_b1, s_b2, sb_b3, s_b4;
+ } S_un_b;
+ cpr_in_addr_t S_addr;
+ } S_un;
+#else
+ cpr_in_addr_t s_addr;
+#endif
+};
+
+/*
+ * IPv6 address structure
+ */
+typedef struct
+{
+ union
+ {
+ uint8_t base8[16];
+ uint16_t base16[8];
+ uint32_t base32[4];
+ } addr;
+} cpr_in6_addr_t;
+
+#ifndef s6_addr
+#define s6_addr addr.base8
+#endif
+#ifndef s6_addr16
+#define s6_addr16 addr.base16
+#endif
+#ifndef s6_addr32
+#define s6_addr32 addr.base32
+#endif
+
+typedef enum
+{
+ CPR_IP_ADDR_INVALID=0,
+ CPR_IP_ADDR_IPV4,
+ CPR_IP_ADDR_IPV6
+} cpr_ip_type;
+
+typedef enum
+{
+ CPR_IP_MODE_IPV4 = 0,
+ CPR_IP_MODE_IPV6,
+ CPR_IP_MODE_DUAL
+}
+cpr_ip_mode_e;
+/*
+ * IP address structure
+ */
+typedef struct
+{
+ cpr_ip_type type;
+ union
+ {
+ cpr_in_addr_t ip4;
+ cpr_in6_addr_t ip6;
+ } u;
+} cpr_ip_addr_t;
+
+extern const cpr_ip_addr_t ip_addr_invalid;
+
+#define MAX_IPADDR_STR_LEN 48
+
+
+#define CPR_IP_ADDR_INIT(a) a.type = CPR_IP_ADDR_INVALID;
+
+/*
+ * !!! NOTE !!!
+ *
+ * The strings of type string_t are actually very special blocks
+ * of memory that have a "hidden" header block immediately preceding
+ * the pointer. You MUST use the functions in string_lib.c to
+ * create, manipulate, destroy, copy, or otherwise work with these
+ * strings.
+ */
+
+typedef const char *string_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/media/webrtc/signaling/src/sdp/sipcc/cpr_win_types.h b/media/webrtc/signaling/src/sdp/sipcc/cpr_win_types.h
new file mode 100644
index 000000000..c4dfa0b72
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/cpr_win_types.h
@@ -0,0 +1,71 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _CPR_WIN_TYPES_H_
+#define _CPR_WIN_TYPES_H_
+
+#include <sys/types.h>
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+
+#include <windows.h>
+#ifdef SIPCC_BUILD
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windef.h>
+#endif
+#include <stddef.h>
+#include <stdlib.h>
+
+/*
+ * Define POSIX types
+ * [u]int[8,16,32,64]_t
+ */
+#include <stdint.h>
+
+/*
+ * Define boolean
+ * in windef.h: BOOL => int
+ */
+typedef uint8_t boolean;
+
+/*
+ * Define ssize_t if required. The MinGW W32API already defines ssize_t
+ * in <sys/types.h> (protected by _SSIZE_T_) so this will only apply to
+ * Microsoft SDK.
+ *
+ * NOTE: size_t should already be declared by both the MinGW and Microsoft
+ * SDKs.
+ */
+#ifndef _SSIZE_T_
+#define _SSIZE_T_
+typedef int ssize_t;
+#endif
+
+/*
+ * Define pid_t.
+ */
+typedef int pid_t;
+
+/*
+ * Define min/max
+ * defined in windef.h as lowercase
+ */
+#ifndef MIN
+#define MIN min
+#endif
+
+#ifndef MAX
+#define MAX max
+#endif
+
+/*
+ * Define NULL
+ * defined in numerous header files
+ */
+/* DONE defined in windef.h */
+
+#endif // _CPR_WIN_TYPES_H_
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp.h b/media/webrtc/signaling/src/sdp/sipcc/sdp.h
new file mode 100644
index 000000000..9bd2ce132
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp.h
@@ -0,0 +1,1794 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SIPCC_SDP_H_
+#define _SIPCC_SDP_H_
+
+#include "sdp_os_defs.h"
+#include "ccsdp.h"
+
+/* SDP Defines */
+
+/* The following defines are used to indicate params that are specified
+ * as the choose parameter or parameters that are invalid. These can
+ * be used where the value required is really a uint16_t, but is represented
+ * by an int32_t.
+ */
+#define SDP_CHOOSE_PARAM (-1)
+#define SDP_SESSION_LEVEL 0xFFFF
+
+#define UNKNOWN_CRYPTO_SUITE "UNKNOWN_CRYPTO_SUITE"
+#define AES_CM_128_HMAC_SHA1_32 "AES_CM_128_HMAC_SHA1_32"
+#define AES_CM_128_HMAC_SHA1_80 "AES_CM_128_HMAC_SHA1_80"
+#define F8_128_HMAC_SHA1_80 "F8_128_HMAC_SHA1_80"
+
+/* Pulled in from rtp_defs.h. */
+#define GET_DYN_PAYLOAD_TYPE_VALUE(a) ((a & 0XFF00) ? ((a & 0XFF00) >> 8) : a)
+#define SET_PAYLOAD_TYPE_WITH_DYNAMIC(a,b) ((a << 8) | b)
+
+/*
+ * SDP_SRTP_MAX_KEY_SIZE_BYTES
+ * Maximum size for a SRTP Master Key in bytes.
+ */
+#define SDP_SRTP_MAX_KEY_SIZE_BYTES 16
+/*
+ * SDP_SRTP_MAX_SALT_SIZE_BYTES
+ * Maximum size for a SRTP Master Salt in bytes.
+ */
+#define SDP_SRTP_MAX_SALT_SIZE_BYTES 14
+/*
+ * SDP_SRTP_MAX_MKI_SIZE_BYTES
+ * Maximum size for a SRTP Master Key Index in bytes.
+ */
+#define SDP_SRTP_MAX_MKI_SIZE_BYTES 4
+
+/* Max number of characters for Lifetime */
+#define SDP_SRTP_MAX_LIFETIME_BYTES 16
+
+#define SDP_SDESCRIPTIONS_KEY_SIZE_UNKNOWN 0
+#define SDP_SRTP_CRYPTO_SELECTION_FLAGS_UNKNOWN 0
+
+/* Max number of fmtp redundant encodings */
+#define SDP_FMTP_MAX_REDUNDANT_ENCODINGS 128
+
+/*
+ * SRTP_CONTEXT_SET_*
+ * Set a SRTP Context field flag
+ */
+#define SDP_SRTP_ENCRYPT_MASK 0x00000001
+#define SDP_SRTP_AUTHENTICATE_MASK 0x00000002
+#define SDP_SRTCP_ENCRYPT_MASK 0x00000004
+#define SDP_SRTCP_SSRC_MASK 0x20000000
+#define SDP_SRTCP_ROC_MASK 0x10000000
+#define SDP_SRTCP_KDR_MASK 0x08000000
+#define SDP_SRTCP_KEY_MASK 0x80000000
+#define SDP_SRTCP_SALT_MASK 0x40000000
+
+#define SDP_SRTP_CONTEXT_SET_SSRC(cw) ((cw) |= SDP_SRTCP_SSRC_MASK)
+#define SDP_SRTP_CONTEXT_SET_ROC(cw) ((cw) |= SDP_SRTCP_ROC_MASK)
+#define SDP_SRTP_CONTEXT_SET_KDR(cw) ((cw) |= SDP_SRTCP_KDR_MASK)
+#define SDP_SRTP_CONTEXT_SET_MASTER_KEY(cw) ((cw) |= SDP_SRTCP_KEY_MASK)
+#define SDP_SRTP_CONTEXT_SET_MASTER_SALT(cw) ((cw) |= SDP_SRTCP_SALT_MASK)
+#define SDP_SRTP_CONTEXT_SET_ENCRYPT_AUTHENTICATE(cw) \
+ ((cw) |= (SDP_SRTP_ENCRYPT_MASK | SDP_SRTP_AUTHENTICATE_MASK | \
+ SDP_SRTCP_ENCRYPT_MASK))
+#define SDP_SRTP_CONTEXT_RESET_SSRC(cw) ((cw) &= ~(SDP_SRTCP_SSRC_MASK))
+#define SDP_SRTP_CONTEXT_RESET_ROC(cw) ((cw) &= ~(SDP_SRTCP_ROC_MASK))
+#define SDP_SRTP_CONTEXT_RESET_KDR(cw) ((cw) &= ~(SDP_SRTCP_KDR_MASK))
+#define SDP_CONTEXT_RESET_MASTER_KEY(cw) ((cw) &= ~(SDP_SRTCP_KEY_MASK))
+#define SDP_CONTEXT_RESET_MASTER_SALT(cw) ((cw) &= ~(SDP_SRTCP_SALT_MASK))
+#define SDP_EXTMAP_AUDIO_LEVEL "urn:ietf:params:rtp-hdrext:ssrc-audio-level"
+
+/* SDP Enum Types */
+typedef enum {
+ SDP_DEBUG_TRACE,
+ SDP_DEBUG_WARNINGS,
+ SDP_DEBUG_ERRORS,
+ SDP_MAX_DEBUG_TYPES
+} sdp_debug_e;
+
+typedef enum {
+ SDP_CHOOSE_CONN_ADDR,
+ SDP_CHOOSE_PORTNUM,
+ SDP_MAX_CHOOSE_PARAMS
+} sdp_choose_param_e;
+
+
+/* Token Lines - these must be in the same order they should
+ * appear in an SDP.
+ */
+typedef enum {
+ SDP_TOKEN_V = 0,
+ SDP_TOKEN_O,
+ SDP_TOKEN_S,
+ SDP_TOKEN_I,
+ SDP_TOKEN_U,
+ SDP_TOKEN_E,
+ SDP_TOKEN_P,
+ SDP_TOKEN_C,
+ SDP_TOKEN_B,
+ SDP_TOKEN_T,
+ SDP_TOKEN_R,
+ SDP_TOKEN_Z,
+ SDP_TOKEN_K,
+ SDP_TOKEN_A,
+ SDP_TOKEN_M,
+ SDP_MAX_TOKENS
+} sdp_token_e;
+
+/* Media Types */
+typedef enum {
+ SDP_MEDIA_AUDIO = 0,
+ SDP_MEDIA_VIDEO,
+ SDP_MEDIA_APPLICATION,
+ SDP_MEDIA_DATA,
+ SDP_MEDIA_CONTROL,
+ SDP_MEDIA_NAS_RADIUS,
+ SDP_MEDIA_NAS_TACACS,
+ SDP_MEDIA_NAS_DIAMETER,
+ SDP_MEDIA_NAS_L2TP,
+ SDP_MEDIA_NAS_LOGIN,
+ SDP_MEDIA_NAS_NONE,
+ SDP_MEDIA_TEXT,
+ SDP_MEDIA_IMAGE,
+ SDP_MAX_MEDIA_TYPES,
+ SDP_MEDIA_UNSUPPORTED,
+ SDP_MEDIA_INVALID
+} sdp_media_e;
+
+
+/* Connection Network Type */
+typedef enum {
+ SDP_NT_INTERNET = 0, /* 0 -> IP - In SDP "IN" is defined */
+ /* to mean "Internet" */
+ SDP_NT_ATM, /* 1 -> ATM */
+ SDP_NT_FR, /* 2 -> FRAME RELAY */
+ SDP_NT_LOCAL, /* 3 -> local */
+ SDP_MAX_NETWORK_TYPES,
+ SDP_NT_UNSUPPORTED,
+ SDP_NT_INVALID
+} sdp_nettype_e;
+
+
+/* Address Type */
+typedef enum {
+ SDP_AT_IP4 = 0, /* 0 -> IP Version 4 (IP4) */
+ SDP_AT_IP6, /* 1 -> IP Version 6 (IP6) */
+ SDP_AT_NSAP, /* 2 -> 20 byte NSAP address */
+ SDP_AT_EPN, /* 3 -> 32 bytes of endpoint name */
+ SDP_AT_E164, /* 4 -> 15 digit decimal number addr */
+ SDP_AT_GWID, /* 5 -> Private gw id. ASCII string */
+ SDP_MAX_ADDR_TYPES,
+ SDP_AT_UNSUPPORTED,
+ SDP_AT_FQDN,
+ SDP_AT_INVALID
+} sdp_addrtype_e;
+
+
+/* Transport Types */
+
+#define SDP_MAX_PROFILES 3
+
+typedef enum {
+ SDP_TRANSPORT_RTPAVP = 0,
+ SDP_TRANSPORT_UDP,
+ SDP_TRANSPORT_UDPTL,
+ SDP_TRANSPORT_CES10,
+ SDP_TRANSPORT_LOCAL,
+ SDP_TRANSPORT_AAL2_ITU,
+ SDP_TRANSPORT_AAL2_ATMF,
+ SDP_TRANSPORT_AAL2_CUSTOM,
+ SDP_TRANSPORT_AAL1AVP,
+ SDP_TRANSPORT_UDPSPRT,
+ SDP_TRANSPORT_RTPSAVP,
+ SDP_TRANSPORT_TCP,
+ SDP_TRANSPORT_RTPSAVPF,
+ SDP_TRANSPORT_DTLSSCTP,
+ SDP_TRANSPORT_RTPAVPF,
+ SDP_TRANSPORT_UDPTLSRTPSAVP,
+ SDP_TRANSPORT_UDPTLSRTPSAVPF,
+ SDP_TRANSPORT_TCPTLSRTPSAVP,
+ SDP_TRANSPORT_TCPTLSRTPSAVPF,
+ SDP_MAX_TRANSPORT_TYPES,
+ SDP_TRANSPORT_UNSUPPORTED,
+ SDP_TRANSPORT_INVALID
+} sdp_transport_e;
+
+
+/* Encryption KeyType */
+typedef enum {
+ SDP_ENCRYPT_CLEAR, /* 0 -> Key given in the clear */
+ SDP_ENCRYPT_BASE64, /* 1 -> Base64 encoded key */
+ SDP_ENCRYPT_URI, /* 2 -> Ptr to URI */
+ SDP_ENCRYPT_PROMPT, /* 3 -> No key included, prompt user */
+ SDP_MAX_ENCRYPT_TYPES,
+ SDP_ENCRYPT_UNSUPPORTED,
+ SDP_ENCRYPT_INVALID
+} sdp_encrypt_type_e;
+
+
+/* Known string payload types */
+typedef enum {
+ SDP_PAYLOAD_T38,
+ SDP_PAYLOAD_XTMR,
+ SDP_PAYLOAD_T120,
+ SDP_MAX_STRING_PAYLOAD_TYPES,
+ SDP_PAYLOAD_UNSUPPORTED,
+ SDP_PAYLOAD_INVALID
+} sdp_payload_e;
+
+
+/* Payload type indicator */
+typedef enum {
+ SDP_PAYLOAD_NUMERIC,
+ SDP_PAYLOAD_ENUM
+} sdp_payload_ind_e;
+
+
+/* Image payload types */
+typedef enum {
+ SDP_PORT_NUM_ONLY, /* <port> or '$' */
+ SDP_PORT_NUM_COUNT, /* <port>/<number of ports> */
+ SDP_PORT_VPI_VCI, /* <vpi>/<vci> */
+ SDP_PORT_VCCI, /* <vcci> */
+ SDP_PORT_NUM_VPI_VCI, /* <port>/<vpi>/<vci> */
+ SDP_PORT_VCCI_CID, /* <vcci>/<cid> or '$'/'$' */
+ SDP_PORT_NUM_VPI_VCI_CID, /* <port>/<vpi>/<vci>/<cid> */
+ SDP_MAX_PORT_FORMAT_TYPES,
+ SDP_PORT_FORMAT_INVALID
+} sdp_port_format_e;
+
+
+/* Fmtp attribute format Types */
+typedef enum {
+ SDP_FMTP_NTE,
+ SDP_FMTP_CODEC_INFO,
+ SDP_FMTP_MODE,
+ SDP_FMTP_DATACHANNEL,
+ SDP_FMTP_UNKNOWN_TYPE,
+ SDP_FMTP_MAX_TYPE
+} sdp_fmtp_format_type_e;
+
+
+/* T.38 Rate Mgmt Types */
+typedef enum {
+ SDP_T38_LOCAL_TCF,
+ SDP_T38_TRANSFERRED_TCF,
+ SDP_T38_UNKNOWN_RATE,
+ SDP_T38_MAX_RATES
+} sdp_t38_ratemgmt_e;
+
+
+/* T.38 udp EC Types */
+typedef enum {
+ SDP_T38_UDP_REDUNDANCY,
+ SDP_T38_UDP_FEC,
+ SDP_T38_UDPEC_UNKNOWN,
+ SDP_T38_MAX_UDPEC
+} sdp_t38_udpec_e;
+
+/* Bitmaps for manipulating sdp_direction_e */
+typedef enum {
+ SDP_DIRECTION_FLAG_SEND=0x01,
+ SDP_DIRECTION_FLAG_RECV=0x02
+} sdp_direction_flag_e;
+
+/* Media flow direction */
+typedef enum {
+ SDP_DIRECTION_INACTIVE = 0,
+ SDP_DIRECTION_SENDONLY = SDP_DIRECTION_FLAG_SEND,
+ SDP_DIRECTION_RECVONLY = SDP_DIRECTION_FLAG_RECV,
+ SDP_DIRECTION_SENDRECV = SDP_DIRECTION_FLAG_SEND | SDP_DIRECTION_FLAG_RECV,
+ SDP_MAX_QOS_DIRECTIONS
+} sdp_direction_e;
+
+#define SDP_DIRECTION_PRINT(arg) \
+ (((sdp_direction_e)(arg)) == SDP_DIRECTION_INACTIVE ? "SDP_DIRECTION_INACTIVE " : \
+ ((sdp_direction_e)(arg)) == SDP_DIRECTION_SENDONLY ? "SDP_DIRECTION_SENDONLY": \
+ ((sdp_direction_e)(arg)) == SDP_DIRECTION_RECVONLY ? "SDP_DIRECTION_RECVONLY ": \
+ ((sdp_direction_e)(arg)) == SDP_DIRECTION_SENDRECV ? " SDP_DIRECTION_SENDRECV": "SDP_MAX_QOS_DIRECTIONS")
+
+
+/* QOS Strength tag */
+typedef enum {
+ SDP_QOS_STRENGTH_OPT,
+ SDP_QOS_STRENGTH_MAND,
+ SDP_QOS_STRENGTH_SUCC,
+ SDP_QOS_STRENGTH_FAIL,
+ SDP_QOS_STRENGTH_NONE,
+ SDP_MAX_QOS_STRENGTH,
+ SDP_QOS_STRENGTH_UNKNOWN
+} sdp_qos_strength_e;
+
+
+/* QOS direction */
+typedef enum {
+ SDP_QOS_DIR_SEND,
+ SDP_QOS_DIR_RECV,
+ SDP_QOS_DIR_SENDRECV,
+ SDP_QOS_DIR_NONE,
+ SDP_MAX_QOS_DIR,
+ SDP_QOS_DIR_UNKNOWN
+} sdp_qos_dir_e;
+
+/* QoS Status types */
+typedef enum {
+ SDP_QOS_LOCAL,
+ SDP_QOS_REMOTE,
+ SDP_QOS_E2E,
+ SDP_MAX_QOS_STATUS_TYPES,
+ SDP_QOS_STATUS_TYPE_UNKNOWN
+} sdp_qos_status_types_e;
+
+/* QoS Status types */
+typedef enum {
+ SDP_CURR_QOS_TYPE,
+ SDP_CURR_UNKNOWN_TYPE,
+ SDP_MAX_CURR_TYPES
+} sdp_curr_type_e;
+
+/* QoS Status types */
+typedef enum {
+ SDP_DES_QOS_TYPE,
+ SDP_DES_UNKNOWN_TYPE,
+ SDP_MAX_DES_TYPES
+} sdp_des_type_e;
+
+/* QoS Status types */
+typedef enum {
+ SDP_CONF_QOS_TYPE,
+ SDP_CONF_UNKNOWN_TYPE,
+ SDP_MAX_CONF_TYPES
+} sdp_conf_type_e;
+
+
+/* Named event range result values. */
+typedef enum {
+ SDP_NO_MATCH,
+ SDP_PARTIAL_MATCH,
+ SDP_FULL_MATCH
+} sdp_ne_res_e;
+
+/* Fmtp attribute parameters for audio/video codec information */
+typedef enum {
+
+ /* mainly for audio codecs */
+ SDP_ANNEX_A, /* 0 */
+ SDP_ANNEX_B,
+ SDP_BITRATE,
+
+ /* for video codecs */
+ SDP_QCIF,
+ SDP_CIF,
+ SDP_MAXBR,
+ SDP_SQCIF,
+ SDP_CIF4,
+ SDP_CIF16,
+ SDP_CUSTOM,
+ SDP_PAR,
+ SDP_CPCF,
+ SDP_BPP,
+ SDP_HRD,
+ SDP_PROFILE,
+ SDP_LEVEL,
+ SDP_INTERLACE,
+
+ /* H.264 related */
+ SDP_PROFILE_LEVEL_ID, /* 17 */
+ SDP_PARAMETER_SETS,
+ SDP_PACKETIZATION_MODE,
+ SDP_INTERLEAVING_DEPTH,
+ SDP_DEINT_BUF_REQ,
+ SDP_MAX_DON_DIFF,
+ SDP_INIT_BUF_TIME,
+
+ SDP_MAX_MBPS,
+ SDP_MAX_FS,
+ SDP_MAX_CPB,
+ SDP_MAX_DPB,
+ SDP_MAX_BR,
+ SDP_REDUNDANT_PIC_CAP,
+ SDP_DEINT_BUF_CAP,
+ SDP_MAX_RCMD_NALU_SIZE,
+
+ SDP_PARAMETER_ADD,
+
+ /* Annexes - begin */
+ /* Some require special handling as they don't have token=token format*/
+ SDP_ANNEX_D,
+ SDP_ANNEX_F,
+ SDP_ANNEX_I,
+ SDP_ANNEX_J,
+ SDP_ANNEX_T,
+
+ /* These annexes have token=token format */
+ SDP_ANNEX_K,
+ SDP_ANNEX_N,
+ SDP_ANNEX_P,
+
+ SDP_MODE,
+ SDP_LEVEL_ASYMMETRY_ALLOWED,
+ SDP_MAX_AVERAGE_BIT_RATE,
+ SDP_USED_TX,
+ SDP_STEREO,
+ SDP_USE_IN_BAND_FEC,
+ SDP_MAX_CODED_AUDIO_BW,
+ SDP_CBR,
+ SDP_MAX_FR,
+ SDP_MAX_PLAYBACK_RATE,
+ SDP_MAX_FMTP_PARAM,
+ SDP_FMTP_PARAM_UNKNOWN
+} sdp_fmtp_codec_param_e;
+
+/* Fmtp attribute parameters values for
+ fmtp attribute parameters which convey codec
+ information */
+
+typedef enum {
+ SDP_YES,
+ SDP_NO,
+ SDP_MAX_FMTP_PARAM_VAL,
+ SDP_FMTP_PARAM_UNKNOWN_VAL
+} sdp_fmtp_codec_param_val_e;
+
+/* silenceSupp suppPref */
+typedef enum {
+ SDP_SILENCESUPP_PREF_STANDARD,
+ SDP_SILENCESUPP_PREF_CUSTOM,
+ SDP_SILENCESUPP_PREF_NULL, /* "-" */
+ SDP_MAX_SILENCESUPP_PREF,
+ SDP_SILENCESUPP_PREF_UNKNOWN
+} sdp_silencesupp_pref_e;
+
+/* silenceSupp sidUse */
+typedef enum {
+ SDP_SILENCESUPP_SIDUSE_NOSID,
+ SDP_SILENCESUPP_SIDUSE_FIXED,
+ SDP_SILENCESUPP_SIDUSE_SAMPLED,
+ SDP_SILENCESUPP_SIDUSE_NULL, /* "-" */
+ SDP_MAX_SILENCESUPP_SIDUSE,
+ SDP_SILENCESUPP_SIDUSE_UNKNOWN
+} sdp_silencesupp_siduse_e;
+
+typedef enum {
+ SDP_MEDIADIR_ROLE_PASSIVE,
+ SDP_MEDIADIR_ROLE_ACTIVE,
+ SDP_MEDIADIR_ROLE_BOTH,
+ SDP_MEDIADIR_ROLE_REUSE,
+ SDP_MEDIADIR_ROLE_UNKNOWN,
+ SDP_MAX_MEDIADIR_ROLES,
+ SDP_MEDIADIR_ROLE_UNSUPPORTED,
+ SDP_MEDIADIR_ROLE_INVALID
+} sdp_mediadir_role_e;
+
+typedef enum {
+ SDP_GROUP_ATTR_FID,
+ SDP_GROUP_ATTR_LS,
+ SDP_GROUP_ATTR_ANAT,
+ SDP_GROUP_ATTR_BUNDLE,
+ SDP_MAX_GROUP_ATTR_VAL,
+ SDP_GROUP_ATTR_UNSUPPORTED
+} sdp_group_attr_e;
+
+typedef enum {
+ SDP_SRC_FILTER_INCL,
+ SDP_SRC_FILTER_EXCL,
+ SDP_MAX_FILTER_MODE,
+ SDP_FILTER_MODE_NOT_PRESENT
+} sdp_src_filter_mode_e;
+
+typedef enum {
+ SDP_RTCP_UNICAST_MODE_REFLECTION,
+ SDP_RTCP_UNICAST_MODE_RSI,
+ SDP_RTCP_MAX_UNICAST_MODE,
+ SDP_RTCP_UNICAST_MODE_NOT_PRESENT
+} sdp_rtcp_unicast_mode_e;
+
+typedef enum {
+ SDP_CONNECTION_NOT_FOUND = -1,
+ SDP_CONNECTION_NEW = 0,
+ SDP_CONNECTION_EXISTING,
+ SDP_MAX_CONNECTION,
+ SDP_CONNECTION_UNKNOWN
+} sdp_connection_type_e;
+
+/*
+ * sdp_srtp_fec_order_t
+ * This type defines the order in which to perform FEC
+ * (Forward Error Correction) and SRTP Encryption/Authentication.
+ */
+typedef enum sdp_srtp_fec_order_t_ {
+ SDP_SRTP_THEN_FEC, /* upon sending perform SRTP then FEC */
+ SDP_FEC_THEN_SRTP, /* upon sending perform FEC then SRTP */
+ SDP_SRTP_FEC_SPLIT /* upon sending perform SRTP Encryption,
+ * then FEC, the SRTP Authentication */
+} sdp_srtp_fec_order_t;
+
+
+/*
+ * sdp_srtp_crypto_suite_t
+ * Enumeration of the crypto suites supported for MGCP SRTP
+ * package.
+ */
+typedef enum sdp_srtp_crypto_suite_t_ {
+ SDP_SRTP_UNKNOWN_CRYPTO_SUITE = 0,
+ SDP_SRTP_AES_CM_128_HMAC_SHA1_32,
+ SDP_SRTP_AES_CM_128_HMAC_SHA1_80,
+ SDP_SRTP_F8_128_HMAC_SHA1_80,
+ SDP_SRTP_MAX_NUM_CRYPTO_SUITES
+} sdp_srtp_crypto_suite_t;
+
+/*
+ * SDP SRTP crypto suite definition parameters
+ *
+ * SDP_SRTP_<crypto_suite>_KEY_BYTES
+ * The size of a master key for <crypto_suite> in bytes.
+ *
+ * SDP_SRTP_<crypto_suite>_SALT_BYTES
+ * The size of a master salt for <crypto_suite> in bytes.
+ */
+#define SDP_SRTP_AES_CM_128_HMAC_SHA1_32_KEY_BYTES 16
+#define SDP_SRTP_AES_CM_128_HMAC_SHA1_32_SALT_BYTES 14
+#define SDP_SRTP_AES_CM_128_HMAC_SHA1_80_KEY_BYTES 16
+#define SDP_SRTP_AES_CM_128_HMAC_SHA1_80_SALT_BYTES 14
+#define SDP_SRTP_F8_128_HMAC_SHA1_80_KEY_BYTES 16
+#define SDP_SRTP_F8_128_HMAC_SHA1_80_SALT_BYTES 14
+
+/* SDP Defines */
+
+#define SDP_MAX_LONG_STRING_LEN 4096 /* Max len for long SDP strings */
+#define SDP_MAX_STRING_LEN 256 /* Max len for SDP string */
+#define SDP_MAX_SHORT_STRING_LEN 12 /* Max len for a short SDP string */
+#define SDP_MAX_PAYLOAD_TYPES 23 /* Max payload types in m= line */
+#define SDP_TOKEN_LEN 2 /* Len of <token>= */
+#define SDP_CURRENT_VERSION 0 /* Current default SDP version */
+#define SDP_MAX_PORT_PARAMS 4 /* Max m= port params - x/x/x/x */
+#define SDP_MIN_DYNAMIC_PAYLOAD 96 /* Min dynamic payload */
+#define SDP_MAX_DYNAMIC_PAYLOAD 127 /* Max dynamic payload */
+#define SDP_MIN_CIF_VALUE 1 /* applies to all QCIF,CIF,CIF4,CIF16,SQCIF */
+#define SDP_MAX_CIF_VALUE 32 /* applies to all QCIF,CIF,CIF4,CIF16,SQCIF */
+#define SDP_MAX_SRC_ADDR_LIST 1 /* Max source addrs for which filter applies */
+
+
+#define SDP_DEFAULT_PACKETIZATION_MODE_VALUE 0 /* max packetization mode for H.264 */
+#define SDP_MAX_PACKETIZATION_MODE_VALUE 2 /* max packetization mode for H.264 */
+#define SDP_INVALID_PACKETIZATION_MODE_VALUE 255
+
+#define SDP_MAX_LEVEL_ASYMMETRY_ALLOWED_VALUE 1 /* max level asymmetry allowed value for H.264 */
+#define SDP_DEFAULT_LEVEL_ASYMMETRY_ALLOWED_VALUE 0 /* default level asymmetry allowed value for H.264 */
+#define SDP_INVALID_LEVEL_ASYMMETRY_ALLOWED_VALUE 2 /* invalid value for level-asymmetry-allowed param for H.264 */
+
+
+/* Max number of stream ids that can be grouped together */
+#define SDP_MAX_MEDIA_STREAMS 32
+
+#define SDP_UNSUPPORTED "Unsupported"
+#define SDP_MAX_LINE_LEN 256 /* Max len for SDP Line */
+
+#define SDP_MAX_PROFILE_VALUE 10
+#define SDP_MAX_LEVEL_VALUE 100
+#define SDP_MIN_PROFILE_LEVEL_VALUE 0
+#define SDP_MAX_TTL_VALUE 255
+#define SDP_MIN_MCAST_ADDR_HI_BIT_VAL 224
+#define SDP_MAX_MCAST_ADDR_HI_BIT_VAL 239
+
+/* SDP Enum Types */
+
+typedef enum {
+ SDP_ERR_INVALID_CONF_PTR,
+ SDP_ERR_INVALID_SDP_PTR,
+ SDP_ERR_INTERNAL,
+ SDP_MAX_ERR_TYPES
+} sdp_errmsg_e;
+
+/* SDP Structure Definitions */
+
+/* String names of varios tokens */
+typedef struct {
+ char *name;
+ uint8_t strlen;
+} sdp_namearray_t;
+
+/* c= line info */
+typedef struct {
+ sdp_nettype_e nettype;
+ sdp_addrtype_e addrtype;
+ char conn_addr[SDP_MAX_STRING_LEN+1];
+ tinybool is_multicast;
+ uint16_t ttl;
+ uint16_t num_of_addresses;
+} sdp_conn_t;
+
+/* t= line info */
+typedef struct sdp_timespec {
+ char start_time[SDP_MAX_STRING_LEN+1];
+ char stop_time[SDP_MAX_STRING_LEN+1];
+ struct sdp_timespec *next_p;
+} sdp_timespec_t;
+
+
+/* k= line info */
+typedef struct sdp_encryptspec {
+ sdp_encrypt_type_e encrypt_type;
+ char encrypt_key[SDP_MAX_STRING_LEN+1];
+} sdp_encryptspec_t;
+
+
+/* FMTP attribute deals with named events in the range of 0-255 as
+ * defined in RFC 2833 */
+#define SDP_MIN_NE_VALUE 0
+#define SDP_MAX_NE_VALUES 256
+#define SDP_NE_BITS_PER_WORD ( sizeof(uint32_t) * 8 )
+#define SDP_NE_NUM_BMAP_WORDS ((SDP_MAX_NE_VALUES + SDP_NE_BITS_PER_WORD - 1)/SDP_NE_BITS_PER_WORD )
+#define SDP_NE_BIT_0 ( 0x00000001 )
+#define SDP_NE_ALL_BITS ( 0xFFFFFFFF )
+
+#define SDP_DEINT_BUF_REQ_FLAG 0x1
+#define SDP_INIT_BUF_TIME_FLAG 0x2
+#define SDP_MAX_RCMD_NALU_SIZE_FLAG 0x4
+#define SDP_DEINT_BUF_CAP_FLAG 0x8
+
+#define SDP_FMTP_UNUSED 0xFFFF
+
+typedef struct sdp_fmtp {
+ uint16_t payload_num;
+ uint32_t maxval; /* maxval optimizes bmap search */
+ uint32_t bmap[ SDP_NE_NUM_BMAP_WORDS ];
+ sdp_fmtp_format_type_e fmtp_format; /* Gives the format type
+ for FMTP attribute*/
+ tinybool annexb_required;
+ tinybool annexa_required;
+
+ tinybool annexa;
+ tinybool annexb;
+ uint32_t bitrate;
+ uint32_t mode;
+
+ /* some OPUS specific fmtp params */
+ uint32_t maxplaybackrate;
+ uint32_t maxaveragebitrate;
+ uint16_t usedtx;
+ uint16_t stereo;
+ uint16_t useinbandfec;
+ char maxcodedaudiobandwidth[SDP_MAX_STRING_LEN+1];
+ uint16_t cbr;
+
+ /* BEGIN - All Video related FMTP parameters */
+ uint16_t qcif;
+ uint16_t cif;
+ uint16_t maxbr;
+ uint16_t sqcif;
+ uint16_t cif4;
+ uint16_t cif16;
+
+ uint16_t custom_x;
+ uint16_t custom_y;
+ uint16_t custom_mpi;
+ /* CUSTOM=360,240,4 implies X-AXIS=360, Y-AXIS=240; MPI=4 */
+ uint16_t par_width;
+ uint16_t par_height;
+ /* PAR=12:11 implies par_width=12, par_height=11 */
+
+ /* CPCF should be a float. IOS does not support float and so it is uint16_t */
+ /* For portable stack, CPCF should be defined as float and the parsing should
+ * be modified accordingly */
+ uint16_t cpcf;
+ uint16_t bpp;
+ uint16_t hrd;
+
+ int16_t profile;
+ int16_t level;
+ tinybool is_interlace;
+
+ /* some more H.264 specific fmtp params */
+ char profile_level_id[SDP_MAX_STRING_LEN+1];
+ char parameter_sets[SDP_MAX_STRING_LEN+1];
+ uint16_t packetization_mode;
+ uint16_t level_asymmetry_allowed;
+ uint16_t interleaving_depth;
+ uint32_t deint_buf_req;
+ uint32_t max_don_diff;
+ uint32_t init_buf_time;
+
+ uint32_t max_mbps;
+ uint32_t max_fs;
+ uint32_t max_fr;
+ uint32_t max_cpb;
+ uint32_t max_dpb;
+ uint32_t max_br;
+ tinybool redundant_pic_cap;
+ uint32_t deint_buf_cap;
+ uint32_t max_rcmd_nalu_size;
+ uint16_t parameter_add;
+
+ tinybool annex_d;
+
+ tinybool annex_f;
+ tinybool annex_i;
+ tinybool annex_j;
+ tinybool annex_t;
+
+ /* H.263 codec requires annex K,N and P to have values */
+ uint16_t annex_k_val;
+ uint16_t annex_n_val;
+
+ /* RFC 5109 Section 4.2 for specifying redundant encodings */
+ uint8_t redundant_encodings[SDP_FMTP_MAX_REDUNDANT_ENCODINGS];
+
+ /* RFC 2833 Section 3.9 (4733) for specifying support DTMF tones:
+ The list of values consists of comma-separated elements, which
+ can be either a single decimal number or two decimal numbers
+ separated by a hyphen (dash), where the second number is larger
+ than the first. No whitespace is allowed between numbers or
+ hyphens. The list does not have to be sorted.
+ */
+ char dtmf_tones[SDP_MAX_STRING_LEN+1];
+
+ /* Annex P can take one or more values in the range 1-4 . e.g P=1,3 */
+ uint16_t annex_p_val_picture_resize; /* 1 = four; 2 = sixteenth */
+ uint16_t annex_p_val_warp; /* 3 = half; 4=sixteenth */
+
+ uint8_t flag;
+
+ /* END - All Video related FMTP parameters */
+
+} sdp_fmtp_t;
+
+/* a=sctpmap line used for Datachannels */
+typedef struct sdp_sctpmap {
+ uint16_t port;
+ uint32_t streams; /* Num streams per Datachannel */
+ char protocol[SDP_MAX_STRING_LEN+1];
+} sdp_sctpmap_t;
+
+#define SDP_MAX_MSID_LEN 64
+
+typedef struct sdp_msid {
+ char identifier[SDP_MAX_MSID_LEN+1];
+ char appdata[SDP_MAX_MSID_LEN+1];
+} sdp_msid_t;
+
+/* a=qos|secure|X-pc-qos|X-qos info */
+typedef struct sdp_qos {
+ sdp_qos_strength_e strength;
+ sdp_qos_dir_e direction;
+ tinybool confirm;
+ sdp_qos_status_types_e status_type;
+} sdp_qos_t;
+
+/* a=curr:qos status_type direction */
+typedef struct sdp_curr {
+ sdp_curr_type_e type;
+ sdp_qos_status_types_e status_type;
+ sdp_qos_dir_e direction;
+} sdp_curr_t;
+
+/* a=des:qos strength status_type direction */
+typedef struct sdp_des {
+ sdp_des_type_e type;
+ sdp_qos_strength_e strength;
+ sdp_qos_status_types_e status_type;
+ sdp_qos_dir_e direction;
+} sdp_des_t;
+
+/* a=conf:qos status_type direction */
+typedef struct sdp_conf {
+ sdp_conf_type_e type;
+ sdp_qos_status_types_e status_type;
+ sdp_qos_dir_e direction;
+} sdp_conf_t;
+
+
+/* a=rtpmap or a=sprtmap info */
+typedef struct sdp_transport_map {
+ uint16_t payload_num;
+ char encname[SDP_MAX_STRING_LEN+1];
+ uint32_t clockrate;
+ uint16_t num_chan;
+} sdp_transport_map_t;
+
+
+/* a=rtr info */
+typedef struct sdp_rtr {
+ tinybool confirm;
+} sdp_rtr_t;
+
+/* a=subnet info */
+typedef struct sdp_subnet {
+ sdp_nettype_e nettype;
+ sdp_addrtype_e addrtype;
+ char addr[SDP_MAX_STRING_LEN+1];
+ int32_t prefix;
+} sdp_subnet_t;
+
+
+/* a=X-pc-codec info */
+typedef struct sdp_pccodec {
+ uint16_t num_payloads;
+ ushort payload_type[SDP_MAX_PAYLOAD_TYPES];
+} sdp_pccodec_t;
+
+/* a=direction info */
+typedef struct sdp_comediadir {
+ sdp_mediadir_role_e role;
+ tinybool conn_info_present;
+ sdp_conn_t conn_info;
+ uint32_t src_port;
+} sdp_comediadir_t;
+
+
+
+/* a=silenceSupp info */
+typedef struct sdp_silencesupp {
+ tinybool enabled;
+ tinybool timer_null;
+ uint16_t timer;
+ sdp_silencesupp_pref_e pref;
+ sdp_silencesupp_siduse_e siduse;
+ tinybool fxnslevel_null;
+ uint8_t fxnslevel;
+} sdp_silencesupp_t;
+
+
+/*
+ * a=mptime info */
+/* Note that an interval value of zero corresponds to
+ * the "-" syntax on the a= line.
+ */
+typedef struct sdp_mptime {
+ uint16_t num_intervals;
+ ushort intervals[SDP_MAX_PAYLOAD_TYPES];
+} sdp_mptime_t;
+
+/*
+ * a=X-sidin:<val>, a=X-sidout:< val> and a=X-confid: <val>
+ * Stream Id,ConfID related attributes to be used for audio/video conferencing
+ *
+*/
+
+typedef struct sdp_stream_data {
+ char x_sidin[SDP_MAX_STRING_LEN+1];
+ char x_sidout[SDP_MAX_STRING_LEN+1];
+ char x_confid[SDP_MAX_STRING_LEN+1];
+ sdp_group_attr_e group_attr; /* FID or LS */
+ uint16_t num_group_id;
+ char * group_ids[SDP_MAX_MEDIA_STREAMS];
+} sdp_stream_data_t;
+
+typedef struct sdp_msid_semantic {
+ char semantic[SDP_MAX_STRING_LEN+1];
+ char * msids[SDP_MAX_MEDIA_STREAMS];
+} sdp_msid_semantic_t;
+
+/*
+ * a=source-filter:<filter-mode> <filter-spec>
+ * <filter-spec> = <nettype> <addrtype> <dest-addr> <src_addr><src_addr>...
+ * One or more source addresses to apply filter, for one or more connection
+ * address in unicast/multicast environments
+ */
+typedef struct sdp_source_filter {
+ sdp_src_filter_mode_e mode;
+ sdp_nettype_e nettype;
+ sdp_addrtype_e addrtype;
+ char dest_addr[SDP_MAX_STRING_LEN+1];
+ uint16_t num_src_addr;
+ char src_list[SDP_MAX_SRC_ADDR_LIST+1][SDP_MAX_STRING_LEN+1];
+} sdp_source_filter_t;
+
+/*
+ * a=rtcp-fb:<payload-type> <feedback-type> [<feedback-parameters>]
+ * Defines RTCP feedback parameters
+ */
+#define SDP_ALL_PAYLOADS 0xFFFF
+typedef struct sdp_fmtp_fb {
+ uint16_t payload_num; /* can be SDP_ALL_PAYLOADS */
+ sdp_rtcp_fb_type_e feedback_type;
+ union {
+ sdp_rtcp_fb_ack_type_e ack;
+ sdp_rtcp_fb_ccm_type_e ccm;
+ sdp_rtcp_fb_nack_type_e nack;
+ uint32_t trr_int;
+ } param;
+ char extra[SDP_MAX_STRING_LEN + 1]; /* Holds any trailing information that
+ cannot be represented by preceding
+ fields. */
+} sdp_fmtp_fb_t;
+
+typedef struct sdp_rtcp {
+ sdp_nettype_e nettype;
+ sdp_addrtype_e addrtype;
+ char addr[SDP_MAX_STRING_LEN+1];
+ uint16_t port;
+} sdp_rtcp_t;
+
+/*
+ * b=<bw-modifier>:<val>
+ *
+*/
+typedef struct sdp_bw_data {
+ struct sdp_bw_data *next_p;
+ sdp_bw_modifier_e bw_modifier;
+ int bw_val;
+} sdp_bw_data_t;
+
+/*
+ * This structure houses a linked list of sdp_bw_data_t instances. Each
+ * sdp_bw_data_t instance represents one b= line.
+ */
+typedef struct sdp_bw {
+ uint16_t bw_data_count;
+ sdp_bw_data_t *bw_data_list;
+} sdp_bw_t;
+
+/* Media lines for AAL2 may have more than one transport type defined
+ * each with its own payload type list. These are referred to as
+ * profile types instead of transport types. This structure is used
+ * to handle these multiple profile types. Note: One additional profile
+ * field is needed because of the way parsing is done. This is not an
+ * error. */
+typedef struct sdp_media_profiles {
+ uint16_t num_profiles;
+ sdp_transport_e profile[SDP_MAX_PROFILES+1];
+ uint16_t num_payloads[SDP_MAX_PROFILES];
+ sdp_payload_ind_e payload_indicator[SDP_MAX_PROFILES][SDP_MAX_PAYLOAD_TYPES];
+ uint16_t payload_type[SDP_MAX_PROFILES][SDP_MAX_PAYLOAD_TYPES];
+} sdp_media_profiles_t;
+/*
+ * a=extmap:<value>["/"<direction>] <URI> <extensionattributes>
+ *
+ */
+typedef struct sdp_extmap {
+ uint16_t id;
+ sdp_direction_e media_direction;
+ tinybool media_direction_specified;
+ char uri[SDP_MAX_STRING_LEN+1];
+ char extension_attributes[SDP_MAX_STRING_LEN+1];
+} sdp_extmap_t;
+
+typedef struct sdp_ssrc {
+ uint32_t ssrc;
+ char attribute[SDP_MAX_STRING_LEN + 1];
+} sdp_ssrc_t;
+
+/*
+ * sdp_srtp_crypto_context_t
+ * This type is used to hold cryptographic context information.
+ *
+ */
+typedef struct sdp_srtp_crypto_context_t_ {
+ int32_t tag;
+ unsigned long selection_flags;
+ sdp_srtp_crypto_suite_t suite;
+ unsigned char master_key[SDP_SRTP_MAX_KEY_SIZE_BYTES];
+ unsigned char master_salt[SDP_SRTP_MAX_SALT_SIZE_BYTES];
+ unsigned char master_key_size_bytes;
+ unsigned char master_salt_size_bytes;
+ unsigned long ssrc; /* not used */
+ unsigned long roc; /* not used */
+ unsigned long kdr; /* not used */
+ unsigned short seq; /* not used */
+ sdp_srtp_fec_order_t fec_order; /* not used */
+ unsigned char master_key_lifetime[SDP_SRTP_MAX_LIFETIME_BYTES];
+ unsigned char mki[SDP_SRTP_MAX_MKI_SIZE_BYTES];
+ uint16_t mki_size_bytes;
+ char* session_parameters;
+} sdp_srtp_crypto_context_t;
+
+
+/* m= line info and associated attribute list */
+/* Note: Most of the port parameter values are 16-bit values. We set
+ * the type to int32_t so we can return either a 16-bit value or the
+ * choose value. */
+typedef struct sdp_mca {
+ sdp_media_e media;
+ sdp_conn_t conn;
+ sdp_transport_e transport;
+ sdp_port_format_e port_format;
+ int32_t port;
+ int32_t sctpport;
+ int32_t num_ports;
+ int32_t vpi;
+ uint32_t vci; /* VCI needs to be 32-bit */
+ int32_t vcci;
+ int32_t cid;
+ uint16_t num_payloads;
+ sdp_payload_ind_e payload_indicator[SDP_MAX_PAYLOAD_TYPES];
+ uint16_t payload_type[SDP_MAX_PAYLOAD_TYPES];
+ sdp_media_profiles_t *media_profiles_p;
+ tinybool sessinfo_found;
+ sdp_encryptspec_t encrypt;
+ sdp_bw_t bw;
+ sdp_attr_e media_direction; /* Either INACTIVE, SENDONLY,
+ RECVONLY, or SENDRECV */
+ uint32_t mid;
+ uint32_t line_number;
+ struct sdp_attr *media_attrs_p;
+ struct sdp_mca *next_p;
+} sdp_mca_t;
+
+
+/* generic a= line info */
+typedef struct sdp_attr {
+ sdp_attr_e type;
+ uint32_t line_number;
+ union {
+ tinybool boolean_val;
+ uint32_t u32_val;
+ char string_val[SDP_MAX_STRING_LEN+1];
+ char *stringp;
+ char ice_attr[SDP_MAX_STRING_LEN+1];
+ sdp_fmtp_t fmtp;
+ sdp_sctpmap_t sctpmap;
+ sdp_msid_t msid;
+ sdp_qos_t qos;
+ sdp_curr_t curr;
+ sdp_des_t des;
+ sdp_conf_t conf;
+ sdp_transport_map_t transport_map; /* A rtpmap or sprtmap */
+ sdp_subnet_t subnet;
+ sdp_t38_ratemgmt_e t38ratemgmt;
+ sdp_t38_udpec_e t38udpec;
+ sdp_pccodec_t pccodec;
+ sdp_silencesupp_t silencesupp;
+ sdp_mca_t *cap_p; /* A X-CAP or CDSC attribute */
+ sdp_rtr_t rtr;
+ sdp_comediadir_t comediadir;
+ sdp_srtp_crypto_context_t srtp_context;
+ sdp_mptime_t mptime;
+ sdp_stream_data_t stream_data;
+ sdp_msid_semantic_t msid_semantic;
+ char unknown[SDP_MAX_STRING_LEN+1];
+ sdp_source_filter_t source_filter;
+ sdp_fmtp_fb_t rtcp_fb;
+ sdp_rtcp_t rtcp;
+ sdp_setup_type_e setup;
+ sdp_connection_type_e connection;
+ sdp_extmap_t extmap;
+ sdp_ssrc_t ssrc;
+ } attr;
+ struct sdp_attr *next_p;
+} sdp_attr_t;
+typedef struct sdp_srtp_crypto_suite_list_ {
+ sdp_srtp_crypto_suite_t crypto_suite_val;
+ char * crypto_suite_str;
+ unsigned char key_size_bytes;
+ unsigned char salt_size_bytes;
+} sdp_srtp_crypto_suite_list;
+
+typedef void (*sdp_parse_error_handler)(void *context,
+ uint32_t line,
+ const char *message);
+
+/* Application configuration options */
+typedef struct sdp_conf_options {
+ tinybool debug_flag[SDP_MAX_DEBUG_TYPES];
+ tinybool version_reqd;
+ tinybool owner_reqd;
+ tinybool session_name_reqd;
+ tinybool timespec_reqd;
+ tinybool media_supported[SDP_MAX_MEDIA_TYPES];
+ tinybool nettype_supported[SDP_MAX_NETWORK_TYPES];
+ tinybool addrtype_supported[SDP_MAX_ADDR_TYPES];
+ tinybool transport_supported[SDP_MAX_TRANSPORT_TYPES];
+ tinybool allow_choose[SDP_MAX_CHOOSE_PARAMS];
+ /* Statistics counts */
+ uint32_t num_builds;
+ uint32_t num_parses;
+ uint32_t num_not_sdp_desc;
+ uint32_t num_invalid_token_order;
+ uint32_t num_invalid_param;
+ uint32_t num_no_resource;
+ struct sdp_conf_options *next_p;
+ sdp_parse_error_handler error_handler;
+ void *error_handler_context;
+} sdp_conf_options_t;
+
+
+/* Session level SDP info with pointers to media line info. */
+/* Elements here that can only be one of are included directly. Elements */
+/* that can be more than one are pointers. */
+typedef struct {
+ sdp_conf_options_t *conf_p;
+ tinybool debug_flag[SDP_MAX_DEBUG_TYPES];
+ char debug_str[SDP_MAX_STRING_LEN+1];
+ uint32_t debug_id;
+ int32_t version; /* version is really a uint16_t */
+ char owner_name[SDP_MAX_STRING_LEN+1];
+ char owner_sessid[SDP_MAX_STRING_LEN+1];
+ char owner_version[SDP_MAX_STRING_LEN+1];
+ sdp_nettype_e owner_network_type;
+ sdp_addrtype_e owner_addr_type;
+ char owner_addr[SDP_MAX_STRING_LEN+1];
+ char sessname[SDP_MAX_STRING_LEN+1];
+ tinybool sessinfo_found;
+ tinybool uri_found;
+ sdp_conn_t default_conn;
+ sdp_timespec_t *timespec_p;
+ sdp_encryptspec_t encrypt;
+ sdp_bw_t bw;
+ sdp_attr_t *sess_attrs_p;
+
+ /* Info to help with building capability attributes. */
+ uint16_t cur_cap_num;
+ sdp_mca_t *cur_cap_p;
+ /* Info to help parsing X-cpar attrs. */
+ uint16_t cap_valid;
+ uint16_t last_cap_inst;
+ /* Info to help building X-cpar/cpar attrs. */
+ sdp_attr_e last_cap_type;
+
+ /* Facilitates reporting line number for SDP errors */
+ uint32_t parse_line;
+
+ /* MCA - Media, connection, and attributes */
+ sdp_mca_t *mca_p;
+ ushort mca_count;
+} sdp_t;
+
+
+/* Token processing table. */
+typedef struct {
+ char *name;
+ sdp_result_e (*parse_func)(sdp_t *sdp_p, uint16_t level, const char *ptr);
+ sdp_result_e (*build_func)(sdp_t *sdp_p, uint16_t level, flex_string *fs);
+} sdp_tokenarray_t;
+
+/* Attribute processing table. */
+typedef struct {
+ char *name;
+ uint16_t strlen;
+ sdp_result_e (*parse_func)(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+ sdp_result_e (*build_func)(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+} sdp_attrarray_t;
+
+
+/* Prototypes */
+
+/* sdp_config.c */
+extern sdp_conf_options_t *sdp_init_config(void);
+extern void sdp_free_config(sdp_conf_options_t *config_p);
+extern void sdp_appl_debug(sdp_conf_options_t *config_p, sdp_debug_e debug_type,
+ tinybool debug_flag);
+extern void sdp_require_version(sdp_conf_options_t *config_p, tinybool version_required);
+extern void sdp_require_owner(sdp_conf_options_t *config_p, tinybool owner_required);
+extern void sdp_require_session_name(sdp_conf_options_t *config_p,
+ tinybool sess_name_required);
+extern void sdp_require_timespec(sdp_conf_options_t *config_p, tinybool timespec_required);
+extern void sdp_media_supported(sdp_conf_options_t *config_p, sdp_media_e media_type,
+ tinybool media_supported);
+extern void sdp_nettype_supported(sdp_conf_options_t *config_p, sdp_nettype_e nettype,
+ tinybool nettype_supported);
+extern void sdp_addrtype_supported(sdp_conf_options_t *config_p, sdp_addrtype_e addrtype,
+ tinybool addrtype_supported);
+extern void sdp_transport_supported(sdp_conf_options_t *config_p, sdp_transport_e transport,
+ tinybool transport_supported);
+extern void sdp_allow_choose(sdp_conf_options_t *config_p, sdp_choose_param_e param,
+ tinybool choose_allowed);
+extern void sdp_config_set_error_handler(sdp_conf_options_t *config_p,
+ sdp_parse_error_handler handler,
+ void *context);
+
+/* sdp_main.c */
+extern sdp_t *sdp_init_description(sdp_conf_options_t *config_p);
+extern void sdp_debug(sdp_t *sdp_ptr, sdp_debug_e debug_type, tinybool debug_flag);
+extern void sdp_set_string_debug(sdp_t *sdp_ptr, const char *debug_str);
+extern sdp_result_e sdp_parse(sdp_t *sdp_ptr, const char *buf, size_t len);
+extern sdp_result_e sdp_build(sdp_t *sdp_ptr, flex_string *fs);
+extern sdp_result_e sdp_free_description(sdp_t *sdp_ptr);
+extern void sdp_parse_error(sdp_t *sdp, const char *format, ...);
+
+extern const char *sdp_get_result_name(sdp_result_e rc);
+
+
+/* sdp_access.c */
+extern tinybool sdp_version_valid(sdp_t *sdp_p);
+extern int32_t sdp_get_version(sdp_t *sdp_p);
+extern sdp_result_e sdp_set_version(sdp_t *sdp_p, int32_t version);
+
+extern tinybool sdp_owner_valid(sdp_t *sdp_p);
+extern const char *sdp_get_owner_username(sdp_t *sdp_p);
+extern const char *sdp_get_owner_sessionid(sdp_t *sdp_p);
+extern const char *sdp_get_owner_version(sdp_t *sdp_p);
+extern sdp_nettype_e sdp_get_owner_network_type(sdp_t *sdp_p);
+extern sdp_addrtype_e sdp_get_owner_address_type(sdp_t *sdp_p);
+extern const char *sdp_get_owner_address(sdp_t *sdp_p);
+extern sdp_result_e sdp_set_owner_username(sdp_t *sdp_p, const char *username);
+extern sdp_result_e sdp_set_owner_sessionid(sdp_t *sdp_p, const char *sessid);
+extern sdp_result_e sdp_set_owner_version(sdp_t *sdp_p, const char *version);
+extern sdp_result_e sdp_set_owner_network_type(sdp_t *sdp_p,
+ sdp_nettype_e network_type);
+extern sdp_result_e sdp_set_owner_address_type(sdp_t *sdp_p,
+ sdp_addrtype_e address_type);
+extern sdp_result_e sdp_set_owner_address(sdp_t *sdp_p, const char *address);
+
+extern tinybool sdp_session_name_valid(sdp_t *sdp_p);
+extern const char *sdp_get_session_name(sdp_t *sdp_p);
+extern sdp_result_e sdp_set_session_name(sdp_t *sdp_p, const char *sessname);
+
+extern tinybool sdp_timespec_valid(sdp_t *sdp_ptr);
+extern const char *sdp_get_time_start(sdp_t *sdp_ptr);
+extern const char *sdp_get_time_stop(sdp_t *sdp_ptr);
+sdp_result_e sdp_set_time_start(sdp_t *sdp_p, const char *start_time);
+sdp_result_e sdp_set_time_stop(sdp_t *sdp_p, const char *stop_time);
+
+extern tinybool sdp_encryption_valid(sdp_t *sdp_p, uint16_t level);
+extern sdp_encrypt_type_e sdp_get_encryption_method(sdp_t *sdp_p, uint16_t level);
+extern const char *sdp_get_encryption_key(sdp_t *sdp_p, uint16_t level);
+
+extern tinybool sdp_connection_valid(sdp_t *sdp_p, uint16_t level);
+extern tinybool sdp_bw_line_exists(sdp_t *sdp_p, uint16_t level, uint16_t inst_num);
+extern tinybool sdp_bandwidth_valid(sdp_t *sdp_p, uint16_t level, uint16_t inst_num);
+extern sdp_nettype_e sdp_get_conn_nettype(sdp_t *sdp_p, uint16_t level);
+extern sdp_addrtype_e sdp_get_conn_addrtype(sdp_t *sdp_p, uint16_t level);
+extern const char *sdp_get_conn_address(sdp_t *sdp_p, uint16_t level);
+
+extern tinybool sdp_is_mcast_addr (sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_mcast_ttl(sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_mcast_num_of_addresses(sdp_t *sdp_p, uint16_t level);
+
+extern sdp_result_e sdp_set_conn_nettype(sdp_t *sdp_p, uint16_t level,
+ sdp_nettype_e nettype);
+extern sdp_result_e sdp_set_conn_addrtype(sdp_t *sdp_p, uint16_t level,
+ sdp_addrtype_e addrtype);
+extern sdp_result_e sdp_set_conn_address(sdp_t *sdp_p, uint16_t level,
+ const char *address);
+
+extern tinybool sdp_media_line_valid(sdp_t *sdp_p, uint16_t level);
+extern uint16_t sdp_get_num_media_lines(sdp_t *sdp_ptr);
+extern sdp_media_e sdp_get_media_type(sdp_t *sdp_p, uint16_t level);
+extern uint32_t sdp_get_media_line_number(sdp_t *sdp_p, uint16_t level);
+extern sdp_port_format_e sdp_get_media_port_format(sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_media_portnum(sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_media_portcount(sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_media_vpi(sdp_t *sdp_p, uint16_t level);
+extern uint32_t sdp_get_media_vci(sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_media_vcci(sdp_t *sdp_p, uint16_t level);
+extern int32_t sdp_get_media_cid(sdp_t *sdp_p, uint16_t level);
+extern sdp_transport_e sdp_get_media_transport(sdp_t *sdp_p, uint16_t level);
+extern uint16_t sdp_get_media_num_profiles(sdp_t *sdp_p, uint16_t level);
+extern sdp_transport_e sdp_get_media_profile(sdp_t *sdp_p, uint16_t level,
+ uint16_t profile_num);
+extern uint16_t sdp_get_media_num_payload_types(sdp_t *sdp_p, uint16_t level);
+extern uint16_t sdp_get_media_profile_num_payload_types(sdp_t *sdp_p, uint16_t level,
+ uint16_t profile_num);
+extern rtp_ptype sdp_get_known_payload_type(sdp_t *sdp_p,
+ uint16_t level,
+ uint16_t payload_type_raw);
+extern uint32_t sdp_get_media_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t payload_num, sdp_payload_ind_e *indicator);
+extern uint32_t sdp_get_media_profile_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t prof_num, uint16_t payload_num, sdp_payload_ind_e *indicator);
+extern sdp_result_e sdp_insert_media_line(sdp_t *sdp_p, uint16_t level);
+extern sdp_result_e sdp_set_media_type(sdp_t *sdp_p, uint16_t level,
+ sdp_media_e media);
+extern sdp_result_e sdp_set_media_portnum(sdp_t *sdp_p, uint16_t level,
+ int32_t portnum, int32_t sctpport);
+extern int32_t sdp_get_media_sctp_port(sdp_t *sdp_p, uint16_t level);
+extern sdp_result_e sdp_set_media_transport(sdp_t *sdp_p, uint16_t level,
+ sdp_transport_e transport);
+extern sdp_result_e sdp_add_media_profile(sdp_t *sdp_p, uint16_t level,
+ sdp_transport_e profile);
+extern sdp_result_e sdp_add_media_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t payload_type, sdp_payload_ind_e indicator);
+extern sdp_result_e sdp_add_media_profile_payload_type(sdp_t *sdp_p,
+ uint16_t level, uint16_t prof_num, uint16_t payload_type,
+ sdp_payload_ind_e indicator);
+
+/* sdp_attr_access.c */
+extern sdp_attr_t *sdp_find_attr (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_e attr_type, uint16_t inst_num);
+
+extern int sdp_find_fmtp_inst(sdp_t *sdp_ptr, uint16_t level, uint16_t payload_num);
+extern sdp_result_e sdp_add_new_attr(sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_e attr_type, uint16_t *inst_num);
+extern sdp_result_e sdp_attr_num_instances(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e attr_type, uint16_t *num_attr_inst);
+extern tinybool sdp_attr_valid(sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern uint32_t sdp_attr_line_number(sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_simple_string(sdp_t *sdp_p,
+ sdp_attr_e attr_type, uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_long_string(sdp_t *sdp_p,
+ sdp_attr_e attr_type, uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern uint32_t sdp_attr_get_simple_u32(sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_simple_boolean(sdp_t *sdp_p,
+ sdp_attr_e attr_type, uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_is_present (sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num);
+extern const char* sdp_attr_get_maxprate(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern sdp_t38_ratemgmt_e sdp_attr_get_t38ratemgmt(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_t38_udpec_e sdp_attr_get_t38udpec(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_direction_e sdp_get_media_direction(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num);
+extern sdp_qos_strength_e sdp_attr_get_qos_strength(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern sdp_qos_status_types_e sdp_attr_get_qos_status_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern sdp_qos_dir_e sdp_attr_get_qos_direction(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern tinybool sdp_attr_get_qos_confirm(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern sdp_curr_type_e sdp_attr_get_curr_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern sdp_des_type_e sdp_attr_get_des_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern sdp_conf_type_e sdp_attr_get_conf_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num);
+extern sdp_nettype_e sdp_attr_get_subnet_nettype(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_addrtype_e sdp_attr_get_subnet_addrtype(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_subnet_addr(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_subnet_prefix(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern rtp_ptype sdp_attr_get_rtpmap_known_codec(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_rtpmap_payload_valid(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t *inst_num, uint16_t payload_type);
+extern uint16_t sdp_attr_get_rtpmap_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_rtpmap_encname(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern uint32_t sdp_attr_get_rtpmap_clockrate(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern uint16_t sdp_attr_get_rtpmap_num_chan(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_sprtmap_payload_valid(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t *inst_num, uint16_t payload_type);
+extern uint16_t sdp_attr_get_sprtmap_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_sprtmap_encname(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern uint32_t sdp_attr_get_sprtmap_clockrate(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern uint16_t sdp_attr_get_sprtmap_num_chan(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_fmtp_payload_valid(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t *inst_num, uint16_t payload_type);
+extern uint16_t sdp_attr_get_fmtp_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_ne_res_e sdp_attr_fmtp_is_range_set(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint8_t low_val, uint8_t high_val);
+extern tinybool sdp_attr_fmtp_valid(sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint16_t appl_maxval, uint32_t* evt_array);
+extern sdp_result_e sdp_attr_set_fmtp_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint16_t payload_num);
+extern sdp_result_e sdp_attr_get_fmtp_range(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *bmap);
+extern sdp_result_e sdp_attr_clear_fmtp_range(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint8_t low_val, uint8_t high_val);
+extern sdp_ne_res_e sdp_attr_compare_fmtp_ranges(sdp_t *src_sdp_ptr,
+ sdp_t *dst_sdp_ptr, uint16_t src_level, uint16_t dst_level,
+ uint8_t src_cap_num, uint8_t dst_cap_num, uint16_t src_inst_num,
+ uint16_t dst_inst_num);
+extern sdp_result_e sdp_attr_copy_fmtp_ranges(sdp_t *src_sdp_ptr,
+ sdp_t *dst_sdp_ptr, uint16_t src_level, uint16_t dst_level,
+ uint8_t src_cap_num, uint8_t dst_cap_num, uint16_t src_inst_num,
+ uint16_t dst_inst_num);
+extern uint32_t sdp_attr_get_fmtp_mode_for_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint32_t payload_type);
+
+extern sdp_result_e sdp_attr_set_fmtp_max_fs (sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num,
+ uint32_t max_fs);
+
+extern sdp_result_e sdp_attr_set_fmtp_max_fr (sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num,
+ uint32_t max_fr);
+
+/* get routines */
+extern int32_t sdp_attr_get_fmtp_bitrate_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+extern int32_t sdp_attr_get_fmtp_cif (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_qcif (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_sqcif (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_cif4 (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_cif16 (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_maxbr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_result_e sdp_attr_get_fmtp_max_average_bitrate (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t* val);
+extern sdp_result_e sdp_attr_get_fmtp_usedtx (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val);
+extern sdp_result_e sdp_attr_get_fmtp_stereo (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val);
+extern sdp_result_e sdp_attr_get_fmtp_useinbandfec (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val);
+extern char* sdp_attr_get_fmtp_maxcodedaudiobandwidth (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_result_e sdp_attr_get_fmtp_cbr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val);
+extern int32_t sdp_attr_get_fmtp_custom_x (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_custom_y (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_custom_mpi (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_par_width (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_par_height (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_bpp (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_hrd (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_profile (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_level (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_fmtp_interlace (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_fmtp_annex_d (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_fmtp_annex_f (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_fmtp_annex_i (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_fmtp_annex_j (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern tinybool sdp_attr_get_fmtp_annex_t (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_annex_k_val (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_annex_n_val (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+extern int32_t sdp_attr_get_fmtp_annex_p_picture_resize (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern int32_t sdp_attr_get_fmtp_annex_p_warp (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+/* sctpmap params */
+extern uint16_t sdp_attr_get_sctpmap_port(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_result_e sdp_attr_get_sctpmap_protocol (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, char* protocol);
+extern sdp_result_e sdp_attr_get_sctpmap_streams (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t* val);
+
+extern const char *sdp_attr_get_msid_identifier(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_msid_appdata(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+/* H.264 codec specific params */
+
+extern const char *sdp_attr_get_fmtp_profile_id(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern const char *sdp_attr_get_fmtp_param_sets(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_result_e sdp_attr_get_fmtp_pack_mode (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t *val);
+
+extern sdp_result_e sdp_attr_get_fmtp_level_asymmetry_allowed (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t *val);
+
+extern sdp_result_e sdp_attr_get_fmtp_interleaving_depth (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint16_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_don_diff (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val);
+
+/* The following four H.264 parameters that require special handling as
+ * the values range from 0 - 4294967295
+ */
+extern sdp_result_e sdp_attr_get_fmtp_deint_buf_req (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_deint_buf_cap (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_init_buf_time (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_rcmd_nalu_size (sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint32_t *val);
+
+
+extern sdp_result_e sdp_attr_get_fmtp_max_mbps (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_fs (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_fr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_cpb (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_dpb (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val);
+extern sdp_result_e sdp_attr_get_fmtp_max_br (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val);
+extern tinybool sdp_attr_fmtp_is_redundant_pic_cap (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+extern tinybool sdp_attr_fmtp_is_parameter_add (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+extern tinybool sdp_attr_fmtp_is_annexa_set (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern tinybool sdp_attr_fmtp_is_annexb_set (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern sdp_fmtp_format_type_e sdp_attr_fmtp_get_fmtp_format (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num);
+
+extern uint16_t sdp_attr_get_pccodec_num_payload_types(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern uint16_t sdp_attr_get_pccodec_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t payload_num);
+extern sdp_result_e sdp_attr_add_pccodec_payload_type(sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint16_t payload_type);
+extern uint16_t sdp_attr_get_xcap_first_cap_num(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern sdp_media_e sdp_attr_get_xcap_media_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern sdp_transport_e sdp_attr_get_xcap_transport_type(sdp_t *sdp_p,
+ uint16_t level, uint16_t inst_num);
+extern uint16_t sdp_attr_get_xcap_num_payload_types(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern uint16_t sdp_attr_get_xcap_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_num,
+ sdp_payload_ind_e *indicator);
+extern sdp_result_e sdp_attr_add_xcap_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_type,
+ sdp_payload_ind_e indicator);
+extern uint16_t sdp_attr_get_cdsc_first_cap_num(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern sdp_media_e sdp_attr_get_cdsc_media_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern sdp_transport_e sdp_attr_get_cdsc_transport_type(sdp_t *sdp_p,
+ uint16_t level, uint16_t inst_num);
+extern uint16_t sdp_attr_get_cdsc_num_payload_types(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern uint16_t sdp_attr_get_cdsc_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_num,
+ sdp_payload_ind_e *indicator);
+extern sdp_result_e sdp_attr_add_cdsc_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_type,
+ sdp_payload_ind_e indicator);
+extern tinybool sdp_media_dynamic_payload_valid (sdp_t *sdp_p, uint16_t payload_type,
+ uint16_t m_line);
+
+extern tinybool sdp_attr_get_rtr_confirm (sdp_t *, uint16_t, uint8_t, uint16_t);
+
+extern tinybool sdp_attr_get_silencesupp_enabled(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern uint16_t sdp_attr_get_silencesupp_timer(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ tinybool *null_ind);
+extern sdp_silencesupp_pref_e sdp_attr_get_silencesupp_pref(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+extern sdp_silencesupp_siduse_e sdp_attr_get_silencesupp_siduse(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+extern uint8_t sdp_attr_get_silencesupp_fxnslevel(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ tinybool *null_ind);
+extern sdp_mediadir_role_e sdp_attr_get_comediadir_role(sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num);
+
+extern uint16_t sdp_attr_get_mptime_num_intervals(
+ sdp_t *sdp_p, uint16_t level, uint8_t cap_num, uint16_t inst_num);
+extern uint16_t sdp_attr_get_mptime_interval(
+ sdp_t *sdp_p, uint16_t level, uint8_t cap_num, uint16_t inst_num, uint16_t interval_num);
+extern sdp_result_e sdp_attr_add_mptime_interval(
+ sdp_t *sdp_p, uint16_t level, uint8_t cap_num, uint16_t inst_num, uint16_t interval);
+
+
+extern sdp_result_e sdp_copy_all_bw_lines(sdp_t *src_sdp_ptr, sdp_t *dst_sdp_ptr,
+ uint16_t src_level, uint16_t dst_level);
+extern sdp_bw_modifier_e sdp_get_bw_modifier(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num);
+extern const char *sdp_get_bw_modifier_name(sdp_bw_modifier_e bw_modifier);
+extern int32_t sdp_get_bw_value(sdp_t *sdp_p, uint16_t level, uint16_t inst_num);
+extern int32_t sdp_get_num_bw_lines (sdp_t *sdp_p, uint16_t level);
+
+extern sdp_result_e sdp_add_new_bw_line(sdp_t *sdp_p, uint16_t level,
+ sdp_bw_modifier_e bw_modifier, uint16_t *inst_num);
+
+extern sdp_group_attr_e sdp_get_group_attr(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+extern const char* sdp_attr_get_x_sidout (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+
+extern const char* sdp_attr_get_x_sidin (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+extern const char* sdp_attr_get_x_confid (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+extern uint16_t sdp_get_group_num_id(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+
+extern const char* sdp_get_group_id(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t id_num);
+
+extern int32_t sdp_get_mid_value(sdp_t *sdp_p, uint16_t level);
+extern sdp_result_e sdp_include_new_filter_src_addr(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ const char *src_addr);
+extern sdp_src_filter_mode_e sdp_get_source_filter_mode(sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num);
+extern sdp_result_e sdp_get_filter_destination_attributes(sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num,
+ sdp_nettype_e *nettype,
+ sdp_addrtype_e *addrtype,
+ char *dest_addr);
+extern int32_t sdp_get_filter_source_address_count(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num);
+extern sdp_result_e sdp_get_filter_source_address (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint16_t src_addr_id,
+ char *src_addr);
+
+extern sdp_rtcp_unicast_mode_e sdp_get_rtcp_unicast_mode(sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num);
+
+void sdp_crypto_debug(char *buffer, ulong length_bytes);
+char * sdp_debug_msg_filter(char *buffer, ulong length_bytes);
+
+extern int32_t
+sdp_attr_get_sdescriptions_tag(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern sdp_srtp_crypto_suite_t
+sdp_attr_get_sdescriptions_crypto_suite(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern const char*
+sdp_attr_get_sdescriptions_key(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern const char*
+sdp_attr_get_sdescriptions_salt(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern const char*
+sdp_attr_get_sdescriptions_lifetime(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern sdp_result_e
+sdp_attr_get_sdescriptions_mki(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num,
+ const char **mki_value,
+ uint16_t *mki_length);
+
+extern const char*
+sdp_attr_get_sdescriptions_session_params(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern unsigned char
+sdp_attr_get_sdescriptions_key_size(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern unsigned char
+sdp_attr_get_sdescriptions_salt_size(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+extern unsigned long
+sdp_attr_get_srtp_crypto_selection_flags(sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num);
+
+sdp_result_e
+sdp_attr_get_ice_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e sdp_attr, uint16_t inst_num,
+ char **out);
+
+sdp_result_e
+sdp_attr_get_rtcp_mux_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e sdp_attr, uint16_t inst_num,
+ tinybool *rtcp_mux);
+
+sdp_result_e
+sdp_attr_get_setup_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, sdp_setup_type_e *setup_type);
+
+sdp_result_e
+sdp_attr_get_connection_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, sdp_connection_type_e *connection_type);
+
+sdp_result_e
+sdp_attr_get_dtls_fingerprint_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e sdp_attr, uint16_t inst_num,
+ char **out);
+
+sdp_rtcp_fb_ack_type_e
+sdp_attr_get_rtcp_fb_ack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst);
+
+sdp_rtcp_fb_nack_type_e
+sdp_attr_get_rtcp_fb_nack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst);
+
+uint32_t
+sdp_attr_get_rtcp_fb_trr_int(sdp_t *sdp_p, uint16_t level, uint16_t payload_type,
+ uint16_t inst);
+
+tinybool
+sdp_attr_get_rtcp_fb_remb_enabled(sdp_t *sdp_p, uint16_t level,
+ uint16_t payload_type);
+
+sdp_rtcp_fb_ccm_type_e
+sdp_attr_get_rtcp_fb_ccm(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst);
+
+sdp_result_e
+sdp_attr_set_rtcp_fb_ack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst,
+ sdp_rtcp_fb_ack_type_e type);
+
+sdp_result_e
+sdp_attr_set_rtcp_fb_nack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst,
+ sdp_rtcp_fb_nack_type_e);
+
+sdp_result_e
+sdp_attr_set_rtcp_fb_trr_int(sdp_t *sdp_p, uint16_t level, uint16_t payload_type,
+ uint16_t inst, uint32_t interval);
+
+sdp_result_e
+sdp_attr_set_rtcp_fb_remb(sdp_t *sdp_p, uint16_t level, uint16_t payload_type,
+ uint16_t inst);
+
+sdp_result_e
+sdp_attr_set_rtcp_fb_ccm(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst,
+ sdp_rtcp_fb_ccm_type_e);
+const char *
+sdp_attr_get_extmap_uri(sdp_t *sdp_p, uint16_t level, uint16_t inst);
+
+uint16_t
+sdp_attr_get_extmap_id(sdp_t *sdp_p, uint16_t level, uint16_t inst);
+
+sdp_result_e
+sdp_attr_set_extmap(sdp_t *sdp_p, uint16_t level, uint16_t id, const char* uri, uint16_t inst);
+
+#endif /* _SDP_H_ */
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_access.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_access.c
new file mode 100644
index 000000000..394e69bc8
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_access.c
@@ -0,0 +1,2083 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+
+static const char* logTag = "sdp_access";
+
+/* Pulled in from ccsip_sdp.h */
+/* Possible encoding names of static payload types*/
+#define SIPSDP_ATTR_ENCNAME_PCMU "PCMU"
+#define SIPSDP_ATTR_ENCNAME_PCMA "PCMA"
+#define SIPSDP_ATTR_ENCNAME_G729 "G729"
+#define SIPSDP_ATTR_ENCNAME_G723 "G723"
+#define SIPSDP_ATTR_ENCNAME_G726 "G726-32"
+#define SIPSDP_ATTR_ENCNAME_G728 "G728"
+#define SIPSDP_ATTR_ENCNAME_GSM "GSM"
+#define SIPSDP_ATTR_ENCNAME_CN "CN"
+#define SIPSDP_ATTR_ENCNAME_G722 "G722"
+#define SIPSDP_ATTR_ENCNAME_ILBC "iLBC"
+#define SIPSDP_ATTR_ENCNAME_H263v2 "H263-1998"
+#define SIPSDP_ATTR_ENCNAME_H264 "H264"
+#define SIPSDP_ATTR_ENCNAME_VP8 "VP8"
+#define SIPSDP_ATTR_ENCNAME_VP9 "VP9"
+#define SIPSDP_ATTR_ENCNAME_L16_256K "L16"
+#define SIPSDP_ATTR_ENCNAME_ISAC "ISAC"
+#define SIPSDP_ATTR_ENCNAME_OPUS "opus"
+#define SIPSDP_ATTR_ENCNAME_RED "red"
+#define SIPSDP_ATTR_ENCNAME_ULPFEC "ulpfec"
+#define SIPSDP_ATTR_ENCNAME_TELEPHONE_EVENT "telephone-event"
+
+/* Function: sdp_find_media_level
+ * Description: Find and return a pointer to the specified media level,
+ * if it exists.
+ * Note: This is not an API for the application but an internal
+ * routine used by the SDP library.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to find.
+ * Returns: Pointer to the media level or NULL if not found.
+ */
+sdp_mca_t *sdp_find_media_level (sdp_t *sdp_p, uint16_t level)
+{
+ int i;
+ sdp_mca_t *mca_p = NULL;
+
+ if ((level >= 1) && (level <= sdp_p->mca_count)) {
+ for (i=1, mca_p = sdp_p->mca_p;
+ ((i < level) && (mca_p != NULL));
+ mca_p = mca_p->next_p, i++) {
+
+ /*sa_ignore EMPTYLOOP*/
+ ; /* Do nothing. */
+ }
+ }
+
+ return (mca_p);
+}
+
+/* Function: sdp_version_valid
+ * Description: Returns true or false depending on whether the version
+ * set for this SDP is valid. Currently the only valid
+ * version is 0.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_version_valid (sdp_t *sdp_p)
+{
+ if (sdp_p->version == SDP_INVALID_VALUE) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_get_version
+ * Description: Returns the version value set for the given SDP.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Version value.
+ */
+int32_t sdp_get_version (sdp_t *sdp_p)
+{
+ return (sdp_p->version);
+}
+
+/* Function: sdp_set_version
+ * Description: Sets the value of the version parameter for the v= version
+ * token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * version Version to set.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_version (sdp_t *sdp_p, int32_t version)
+{
+ sdp_p->version = version;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_owner_valid
+ * Description: Returns true or false depending on whether the owner
+ * token line has been defined for this SDP.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_owner_valid (sdp_t *sdp_p)
+{
+ if ((sdp_p->owner_name[0] == '\0') ||
+ (sdp_p->owner_network_type == SDP_NT_INVALID) ||
+ (sdp_p->owner_addr_type == SDP_AT_INVALID) ||
+ (sdp_p->owner_addr[0] == '\0')) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_get_owner_username
+ * Description: Returns a pointer to the value of the username parameter
+ * from the o= owner token line. Value is returned as a
+ * const ptr and so cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Version value.
+ */
+const char *sdp_get_owner_username (sdp_t *sdp_p)
+{
+ return (sdp_p->owner_name);
+}
+
+/* Function: sdp_get_owner_sessionid
+ * Description: Returns the session id parameter from the o= owner token
+ * line. Because the value may be larger than 32 bits, this
+ * parameter is returned as a string, though has been verified
+ * to be numeric. Value is returned as a const ptr and so
+ * cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Ptr to owner session id or NULL.
+ */
+const char *sdp_get_owner_sessionid (sdp_t *sdp_p)
+{
+ return (sdp_p->owner_sessid);
+}
+
+/* Function: sdp_get_owner_version
+ * Description: Returns the version parameter from the o= owner token
+ * line. Because the value may be larger than 32 bits, this
+ * parameter is returned as a string, though has been verified
+ * to be numeric. Value is returned as a const ptr and so
+ * cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Ptr to owner version or NULL.
+ */
+const char *sdp_get_owner_version (sdp_t *sdp_p)
+{
+ return (sdp_p->owner_version);
+}
+
+/* Function: sdp_get_owner_network_type
+ * Description: Returns the network type parameter from the o= owner token
+ * line. If network type has not been set SDP_NT_INVALID will
+ * be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Network type or SDP_NT_INVALID.
+ */
+sdp_nettype_e sdp_get_owner_network_type (sdp_t *sdp_p)
+{
+ return (sdp_p->owner_network_type);
+}
+
+/* Function: sdp_get_owner_address_type
+ * Description: Returns the address type parameter from the o= owner token
+ * line. If address type has not been set SDP_AT_INVALID will
+ * be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Address type or SDP_AT_INVALID.
+ */
+sdp_addrtype_e sdp_get_owner_address_type (sdp_t *sdp_p)
+{
+ return (sdp_p->owner_addr_type);
+}
+
+/* Function: sdp_get_owner_address
+ * Description: Returns the address parameter from the o= owner token
+ * line. Value is returned as a const ptr and so
+ * cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Ptr to address or NULL.
+ */
+const char *sdp_get_owner_address (sdp_t *sdp_p)
+{
+ return (sdp_p->owner_addr);
+}
+
+/* Function: sdp_set_owner_username
+ * Description: Sets the value of the username parameter for the o= owner
+ * token line. The string is copied into the SDP structure
+ * so application memory will not be referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * username Ptr to the username string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_owner_username (sdp_t *sdp_p, const char *username)
+{
+ sstrncpy(sdp_p->owner_name, username, sizeof(sdp_p->owner_name));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_owner_username
+ * Description: Sets the value of the session id parameter for the o= owner
+ * token line. The string is copied into the SDP structure
+ * so application memory will not be referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * sessionid Ptr to the sessionid string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_owner_sessionid (sdp_t *sdp_p, const char *sessionid)
+{
+ sstrncpy(sdp_p->owner_sessid, sessionid, sizeof(sdp_p->owner_sessid));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_owner_version
+ * Description: Sets the value of the version parameter for the o= owner
+ * token line. The string is copied into the SDP structure
+ * so application memory will not be referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * version Ptr to the version string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_owner_version (sdp_t *sdp_p, const char *version)
+{
+ sstrncpy(sdp_p->owner_version, version, sizeof(sdp_p->owner_version));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_owner_network_type
+ * Description: Sets the value of the network type parameter for the o= owner
+ * token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * network_type Network type for the owner line.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_owner_network_type (sdp_t *sdp_p,
+ sdp_nettype_e network_type)
+{
+ sdp_p->owner_network_type = network_type;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_owner_address_type
+ * Description: Sets the value of the address type parameter for the o= owner
+ * token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * address_type Address type for the owner line.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_owner_address_type (sdp_t *sdp_p,
+ sdp_addrtype_e address_type)
+{
+ sdp_p->owner_addr_type = address_type;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_owner_address
+ * Description: Sets the value of the address parameter for the o= owner
+ * token line. The string is copied into the SDP structure
+ * so application memory will not be referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * version Ptr to the version string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_owner_address (sdp_t *sdp_p, const char *address)
+{
+ sstrncpy(sdp_p->owner_addr, address, sizeof(sdp_p->owner_addr));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_session_name_valid
+ * Description: Returns true or false depending on whether the session name
+ * s= token line has been defined for this SDP.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_session_name_valid (sdp_t *sdp_p)
+{
+ if (sdp_p->sessname[0] == '\0') {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_get_session_name
+ * Description: Returns the session name parameter from the s= session
+ * name token line. Value is returned as a const ptr and so
+ * cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Ptr to session name or NULL.
+ */
+const char *sdp_get_session_name (sdp_t *sdp_p)
+{
+ return (sdp_p->sessname);
+}
+
+/* Function: sdp_set_session_name
+ * Description: Sets the value of the session name parameter for the s=
+ * session name token line. The string is copied into the
+ * SDP structure so application memory will not be
+ * referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * sessname Ptr to the session name string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_session_name (sdp_t *sdp_p, const char *sessname)
+{
+ sstrncpy(sdp_p->sessname, sessname, sizeof(sdp_p->sessname));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_timespec_valid
+ * Description: Returns true or false depending on whether the timespec t=
+ * token line has been defined for this SDP.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_timespec_valid (sdp_t *sdp_p)
+{
+ if ((sdp_p->timespec_p == NULL) ||
+ (sdp_p->timespec_p->start_time[0] == '\0') ||
+ (sdp_p->timespec_p->stop_time[0] == '\0')) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_get_time_start
+ * Description: Returns the start time parameter from the t= timespec token
+ * line. Because the value may be larger than 32 bits, this
+ * parameter is returned as a string, though has been verified
+ * to be numeric. Value is returned as a const ptr and so
+ * cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Ptr to start time or NULL.
+ */
+const char *sdp_get_time_start (sdp_t *sdp_p)
+{
+ if (sdp_p->timespec_p != NULL) {
+ return (sdp_p->timespec_p->start_time);
+ } else {
+ return (NULL);
+ }
+}
+
+/* Function: sdp_get_time_stop
+ * Description: Returns the stop time parameter from the t= timespec token
+ * line. Because the value may be larger than 32 bits, this
+ * parameter is returned as a string, though has been verified
+ * to be numeric. Value is returned as a const ptr and so
+ * cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Ptr to stop time or NULL.
+ */
+const char *sdp_get_time_stop (sdp_t *sdp_p)
+{
+ if (sdp_p->timespec_p != NULL) {
+ return (sdp_p->timespec_p->stop_time);
+ } else {
+ return (NULL);
+ }
+}
+
+/* Function: sdp_set_time_start
+ * Description: Sets the value of the start time parameter for the t=
+ * timespec token line. The string is copied into the
+ * SDP structure so application memory will not be
+ * referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * start_time Ptr to the start time string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_time_start (sdp_t *sdp_p, const char *start_time)
+{
+ if (sdp_p->timespec_p == NULL) {
+ sdp_p->timespec_p = (sdp_timespec_t *)SDP_MALLOC(sizeof(sdp_timespec_t));
+ if (sdp_p->timespec_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+ sdp_p->timespec_p->start_time[0] = '\0';
+ sdp_p->timespec_p->stop_time[0] = '\0';
+ }
+ sstrncpy(sdp_p->timespec_p->start_time, start_time,
+ sizeof(sdp_p->timespec_p->start_time));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_time_stop
+ * Description: Sets the value of the stop time parameter for the t=
+ * timespec token line. The string is copied into the
+ * SDP structure so application memory will not be
+ * referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * stop_time Ptr to the stop time string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_time_stop (sdp_t *sdp_p, const char *stop_time)
+{
+ if (sdp_p->timespec_p == NULL) {
+ sdp_p->timespec_p = (sdp_timespec_t *)SDP_MALLOC(sizeof(sdp_timespec_t));
+ if (sdp_p->timespec_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+ sdp_p->timespec_p->start_time[0] = '\0';
+ sdp_p->timespec_p->stop_time[0] = '\0';
+ }
+ sstrncpy(sdp_p->timespec_p->stop_time, stop_time,
+ sizeof(sdp_p->timespec_p->stop_time));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_encryption_valid
+ * Description: Returns true or false depending on whether the encryption k=
+ * token line has been defined for this SDP at the given level.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the k= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_encryption_valid (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_encryptspec_t *encrypt_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ encrypt_p = &(sdp_p->encrypt);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (FALSE);
+ }
+ encrypt_p = &(mca_p->encrypt);
+ }
+
+ if ((encrypt_p->encrypt_type == SDP_ENCRYPT_INVALID) ||
+ ((encrypt_p->encrypt_type != SDP_ENCRYPT_PROMPT) &&
+ (encrypt_p->encrypt_key[0] == '\0'))) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_get_encryption_method
+ * Description: Returns the encryption method parameter from the k=
+ * encryption token line. If encryption method has not been
+ * set SDP_ENCRYPT_INVALID will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Encryption method or SDP_ENCRYPT_INVALID.
+ */
+sdp_encrypt_type_e sdp_get_encryption_method (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_encryptspec_t *encrypt_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ encrypt_p = &(sdp_p->encrypt);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_ENCRYPT_INVALID);
+ }
+ encrypt_p = &(mca_p->encrypt);
+ }
+
+ return (encrypt_p->encrypt_type);
+}
+
+/* Function: sdp_get_encryption_key
+ * Description: Returns a pointer to the encryption key parameter
+ * from the k= encryption token line. Value is returned as a
+ * const ptr and so cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Ptr to encryption key or NULL.
+ */
+const char *sdp_get_encryption_key (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_encryptspec_t *encrypt_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ encrypt_p = &(sdp_p->encrypt);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (NULL);
+ }
+ encrypt_p = &(mca_p->encrypt);
+ }
+
+ return (encrypt_p->encrypt_key);
+}
+
+/* Function: sdp_connection_valid
+ * Description: Returns true or false depending on whether the connection c=
+ * token line has been defined for this SDP at the given level.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_connection_valid (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (FALSE);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ /*if network type is ATM . then allow c= line without address type
+ * and address . This is a special case to cover PVC
+ */
+ if (conn_p->nettype == SDP_NT_ATM &&
+ conn_p->addrtype == SDP_AT_INVALID) {
+ return TRUE;
+ }
+
+ if ((conn_p->nettype >= SDP_MAX_NETWORK_TYPES) ||
+ (conn_p->addrtype >= SDP_MAX_ADDR_TYPES) ||
+ (conn_p->conn_addr[0] == '\0')) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_bandwidth_valid
+ * Description: Returns true or false depending on whether the bandwidth b=
+ * token line has been defined for this SDP at the given level.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * inst_num instance number of bw line at that level. The first
+ * instance has a inst_num of 1 and so on.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_bandwidth_valid (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ sdp_bw_data_t *bw_data_p;
+
+ bw_data_p = sdp_find_bw_line(sdp_p, level, inst_num);
+ if (bw_data_p != NULL) {
+ if ((bw_data_p->bw_modifier < SDP_BW_MODIFIER_AS) ||
+ (bw_data_p->bw_modifier >= SDP_MAX_BW_MODIFIER_VAL)) {
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+ } else {
+ return FALSE;
+ }
+}
+
+/*
+ * sdp_bw_line_exists
+ *
+ * Description: This api retruns true if there exists a bw line at the
+ * instance and level specified.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * inst_num instance number of bw line at that level. The first
+ * instance has a inst_num of 1 and so on.
+ * Returns: TRUE or FALSE
+ */
+tinybool sdp_bw_line_exists (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ sdp_bw_data_t *bw_data_p;
+
+ bw_data_p = sdp_find_bw_line(sdp_p, level, inst_num);
+ if (bw_data_p != NULL) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+/* Function: sdp_get_conn_nettype
+ * Description: Returns the network type parameter from the c=
+ * connection token line. If network type has not been
+ * set SDP_NT_INVALID will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Network type or SDP_NT_INVALID.
+ */
+sdp_nettype_e sdp_get_conn_nettype (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_NT_INVALID);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ return (conn_p->nettype);
+}
+
+/* Function: sdp_get_conn_addrtype
+ * Description: Returns the address type parameter from the c=
+ * connection token line. If address type has not been
+ * set SDP_AT_INVALID will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Address type or SDP_AT_INVALID.
+ */
+sdp_addrtype_e sdp_get_conn_addrtype (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_AT_INVALID);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ return (conn_p->addrtype);
+}
+
+/* Function: sdp_get_conn_address
+ * Description: Returns a pointer to the address parameter
+ * from the c= connection token line. Value is returned as a
+ * const ptr and so cannot be modified by the application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Ptr to address or NULL.
+ */
+const char *sdp_get_conn_address (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (NULL);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ return (conn_p->conn_addr);
+}
+
+/* Function: sdp_is_mcast_addr
+ * Description: Returns a boolean to indicate if the addr is multicast in
+ * the c=line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: TRUE if the addr is multicast, FALSE if not.
+ */
+
+tinybool sdp_is_mcast_addr (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p != NULL) {
+ conn_p = &(mca_p->conn);
+ } else {
+ return (FALSE);
+ }
+ }
+
+ if ((conn_p) && (conn_p->is_multicast)) {
+ return (TRUE);
+ } else {
+ return (FALSE);
+ }
+}
+
+/* Function: sdp_get_mcast_ttl
+ * Description: Get the time to live(ttl) value for the multicast address
+ * if present.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Multicast address - Time to live (ttl) value
+ */
+
+int32_t sdp_get_mcast_ttl (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+ uint16_t ttl=0;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p != NULL) {
+ conn_p = &(mca_p->conn);
+ } else {
+ return SDP_INVALID_VALUE;
+ }
+ }
+
+ if (conn_p) {
+ ttl = conn_p->ttl;
+ }
+ return ttl;
+}
+
+/* Function: sdp_get_mcast_num_of_addresses
+ * Description: Get the number of addresses value for the multicast address
+ * if present.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: Multicast address - number of addresses value
+ */
+
+int32_t sdp_get_mcast_num_of_addresses (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+ uint16_t num_addr = 0;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p != NULL) {
+ conn_p = &(mca_p->conn);
+ } else {
+ return (SDP_INVALID_VALUE);
+ }
+ }
+
+ if (conn_p) {
+ num_addr = conn_p->num_of_addresses;
+ }
+ return num_addr;
+}
+/* Function: sdp_set_conn_nettype
+ * Description: Sets the value of the network type parameter for the c=
+ * connection token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * nettype Network type for the connection line.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_set_conn_nettype (sdp_t *sdp_p, uint16_t level,
+ sdp_nettype_e nettype)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ conn_p->nettype = nettype;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_conn_addrtype
+ * Description: Sets the value of the address type parameter for the c=
+ * connection token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * addrtype Address type for the connection line.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_set_conn_addrtype (sdp_t *sdp_p, uint16_t level,
+ sdp_addrtype_e addrtype)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ conn_p->addrtype = addrtype;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_conn_address
+ * Description: Sets the value of the address parameter for the c=
+ * connection token line. The string is copied into the
+ * SDP structure so application memory will not be
+ * referenced by the SDP lib.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * either SDP_SESSION_LEVEL or 1-n specifying a
+ * media line level.
+ * address Ptr to the address string.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_set_conn_address (sdp_t *sdp_p, uint16_t level,
+ const char *address)
+{
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ sstrncpy(conn_p->conn_addr, address, sizeof(conn_p->conn_addr));
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_media_line_valid
+ * Description: Returns true or false depending on whether the specified
+ * media line m= has been defined for this SDP. The
+ * SDP_SESSION_LEVEL level is not valid for this check since,
+ * by definition, this is a media level.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the c= line. Will be
+ * 1-n specifying a media line level.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_media_line_valid (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (FALSE);
+ }
+
+ /* Validate params for this media line */
+ if ((mca_p->media >= SDP_MAX_MEDIA_TYPES) ||
+ (mca_p->port_format >= SDP_MAX_PORT_FORMAT_TYPES) ||
+ (mca_p->transport >= SDP_MAX_TRANSPORT_TYPES) ||
+ (mca_p->num_payloads == 0)) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_get_num_media_lines
+ * Description: Returns the number of media lines associated with the SDP.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * Returns: Number of media lines.
+ */
+uint16_t sdp_get_num_media_lines (sdp_t *sdp_p)
+{
+ return (sdp_p->mca_count);
+}
+
+/* Function: sdp_get_media_type
+ * Description: Returns the media type parameter from the m=
+ * media token line. If media type has not been
+ * set SDP_MEDIA_INVALID will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Media type or SDP_MEDIA_INVALID.
+ */
+sdp_media_e sdp_get_media_type (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_MEDIA_INVALID);
+ }
+
+ return (mca_p->media);
+}
+
+/* Function: sdp_get_media_line_number
+ * Description: Returns the line number in the SDP the media
+ * section starts on. Only set if SDP has been parsed
+ * (rather than built).
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Line number (0 if not found or if locally built)
+ */
+uint32_t sdp_get_media_line_number (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return 0;
+ }
+
+ return (mca_p->line_number);
+}
+
+/* Function: sdp_get_media_port_format
+ * Description: Returns the port format type associated with the m=
+ * media token line. If port format type has not been
+ * set SDP_PORT_FORMAT_INVALID will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Port format type or SDP_PORT_FORMAT_INVALID.
+ */
+sdp_port_format_e sdp_get_media_port_format (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_PORT_FORMAT_INVALID);
+ }
+
+ return (mca_p->port_format);
+}
+
+/* Function: sdp_get_media_portnum
+ * Description: Returns the port number associated with the m=
+ * media token line. If port number has not been
+ * set SDP_INVALID_VALUE will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Port number or SDP_INVALID_VALUE.
+ */
+int32_t sdp_get_media_portnum (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_INVALID_VALUE);
+ }
+
+ /* Make sure port number is valid for the specified format. */
+ if ((mca_p->port_format != SDP_PORT_NUM_ONLY) &&
+ (mca_p->port_format != SDP_PORT_NUM_COUNT) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI_CID)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Port num not valid for media line %u",
+ sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+
+ return (mca_p->port);
+}
+
+/* Function: sdp_get_media_portcount
+ * Description: Returns the port count associated with the m=
+ * media token line. If port count has not been
+ * set SDP_INVALID_VALUE will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Port count or SDP_INVALID_VALUE.
+ */
+int32_t sdp_get_media_portcount (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_INVALID_VALUE);
+ }
+
+ /* Make sure port number is valid for the specified format. */
+ if (mca_p->port_format != SDP_PORT_NUM_COUNT) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Port count not valid for media line %u",
+ sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+
+ return (mca_p->num_ports);
+}
+
+/* Function: sdp_get_media_vpi
+ * Description: Returns the VPI parameter associated with the m=
+ * media token line. If VPI has not been set
+ * SDP_INVALID_VALUE will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: VPI or SDP_INVALID_VALUE.
+ */
+int32_t sdp_get_media_vpi (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_INVALID_VALUE);
+ }
+
+ /* Make sure port number is valid for the specified format. */
+ if ((mca_p->port_format != SDP_PORT_VPI_VCI) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI_CID)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s VPI not valid for media line %u",
+ sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+
+ return (mca_p->vpi);
+}
+
+/* Function: sdp_get_media_vci
+ * Description: Returns the VCI parameter associated with the m=
+ * media token line. If VCI has not been set
+ * SDP_INVALID_VALUE will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: VCI or zero.
+ */
+uint32_t sdp_get_media_vci (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (0);
+ }
+
+ /* Make sure port number is valid for the specified format. */
+ if ((mca_p->port_format != SDP_PORT_VPI_VCI) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI_CID)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s VCI not valid for media line %u",
+ sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ }
+
+ return (mca_p->vci);
+}
+
+/* Function: sdp_get_media_vcci
+ * Description: Returns the VCCI parameter associated with the m=
+ * media token line. If VCCI has not been set
+ * SDP_INVALID_VALUE will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: VCCI or SDP_INVALID_VALUE.
+ */
+int32_t sdp_get_media_vcci (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_INVALID_VALUE);
+ }
+
+ /* Make sure port number is valid for the specified format. */
+ if ((mca_p->port_format != SDP_PORT_VCCI) &&
+ (mca_p->port_format != SDP_PORT_VCCI_CID)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s VCCI not valid for media line %u",
+ sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+
+ return (mca_p->vcci);
+}
+
+/* Function: sdp_get_media_cid
+ * Description: Returns the CID parameter associated with the m=
+ * media token line. If CID has not been set
+ * SDP_INVALID_VALUE will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: CID or SDP_INVALID_VALUE.
+ */
+int32_t sdp_get_media_cid (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_INVALID_VALUE);
+ }
+
+ /* Make sure port number is valid for the specified format. */
+ if ((mca_p->port_format != SDP_PORT_VCCI_CID) &&
+ (mca_p->port_format != SDP_PORT_NUM_VPI_VCI_CID)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CID not valid for media line %u",
+ sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+
+ return (mca_p->cid);
+}
+
+/* Function: sdp_get_media_transport
+ * Description: Returns the transport type parameter associated with the m=
+ * media token line. If transport type has not been set
+ * SDP_TRANSPORT_INVALID will be returned. If the transport
+ * type is one of the AAL2 variants, the profile routines below
+ * should be used to access multiple profile types and payload
+ * lists per m= line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: CID or SDP_TRANSPORT_INVALID.
+ */
+sdp_transport_e sdp_get_media_transport (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_TRANSPORT_INVALID);
+ }
+
+ return (mca_p->transport);
+}
+
+/* Function: sdp_get_media_num_profiles
+ * Description: Returns the number of profiles associated with the m=
+ * media token line. If the media line is invalid, zero will
+ * be returned. Application must validate the media line
+ * before using this routine. Multiple profile types per
+ * media line is currently only used for AAL2. If the appl
+ * detects that the transport type is one of the AAL2 types,
+ * it should use these profile access routines to access the
+ * profile types and payload list for each.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Number of profiles or zero.
+ */
+uint16_t sdp_get_media_num_profiles (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (0);
+ }
+
+ if (mca_p->media_profiles_p == NULL) {
+ return (0);
+ } else {
+ return (mca_p->media_profiles_p->num_profiles);
+ }
+}
+
+/* Function: sdp_get_media_profile
+ * Description: Returns the specified profile type associated with the m=
+ * media token line. If the media line or profile number is
+ * invalid, SDP_TRANSPORT_INVALID will be returned.
+ * Applications must validate the media line before using this
+ * routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * profile_num The specific profile type number to be retrieved.
+ * Returns: The profile type or SDP_TRANSPORT_INVALID.
+ */
+sdp_transport_e sdp_get_media_profile (sdp_t *sdp_p, uint16_t level,
+ uint16_t profile_num)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_TRANSPORT_INVALID);
+ }
+
+ if ((profile_num < 1) ||
+ (profile_num > mca_p->media_profiles_p->num_profiles)) {
+ return (SDP_TRANSPORT_INVALID);
+ } else {
+ return (mca_p->media_profiles_p->profile[profile_num-1]);
+ }
+}
+
+/* Function: sdp_get_media_num_payload_types
+ * Description: Returns the number of payload types associated with the m=
+ * media token line. If the media line is invalid, zero will
+ * be returned. Application must validate the media line
+ * before using this routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * Returns: Number of payload types or zero.
+ */
+uint16_t sdp_get_media_num_payload_types (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (0);
+ }
+
+ return (mca_p->num_payloads);
+}
+
+/* Function: sdp_get_media_profile_num_payload_types
+ * Description: Returns the number of payload types associated with the
+ * specified profile on the m= media token line. If the
+ * media line or profile number is invalid, zero will
+ * be returned. Application must validate the media line
+ * and profile before using this routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * profile_num The specific profile number. Will be 1-n.
+ * Returns: Number of payload types or zero.
+ */
+uint16_t sdp_get_media_profile_num_payload_types (sdp_t *sdp_p, uint16_t level,
+ uint16_t profile_num)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (0);
+ }
+
+ if ((profile_num < 1) ||
+ (profile_num > mca_p->media_profiles_p->num_profiles)) {
+ return (0);
+ } else {
+ return (mca_p->media_profiles_p->num_payloads[profile_num-1]);
+ }
+}
+
+rtp_ptype sdp_get_known_payload_type(sdp_t *sdp_p,
+ uint16_t level,
+ uint16_t payload_type_raw) {
+ sdp_attr_t *attr_p;
+ sdp_transport_map_t *rtpmap;
+ uint16_t pack_mode = 0; /*default 0, if remote did not provide any */
+ const char *encname = NULL;
+ uint16_t num_a_lines = 0;
+ int i;
+
+ /*
+ * Get number of RTPMAP attributes for the media line
+ */
+ (void) sdp_attr_num_instances(sdp_p, level, 0, SDP_ATTR_RTPMAP,
+ &num_a_lines);
+
+ /*
+ * Loop through media line RTPMAP attributes.
+ */
+ for (i = 0; i < num_a_lines; i++) {
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_RTPMAP, (i + 1));
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtpmap attribute, level %u instance %u "
+ "not found.",
+ sdp_p->debug_str,
+ (unsigned)level,
+ (unsigned)(i + 1));
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (RTP_NONE);
+ }
+
+ rtpmap = &(attr_p->attr.transport_map);
+
+ if (rtpmap->payload_num == payload_type_raw) {
+ encname = rtpmap->encname;
+ if (encname) {
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_ILBC) == 0) {
+ return (RTP_ILBC);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_L16_256K) == 0) {
+ return (RTP_L16);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_ISAC) == 0) {
+ return (RTP_ISAC);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_OPUS) == 0) {
+ return (RTP_OPUS);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_PCMU) == 0) {
+ return (RTP_PCMU);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_PCMA) == 0) {
+ return (RTP_PCMA);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_G722) == 0) {
+ return (RTP_G722);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_H264) == 0) {
+ int fmtp_inst = sdp_find_fmtp_inst(sdp_p, level, rtpmap->payload_num);
+ if (fmtp_inst < 0) {
+ return (RTP_H264_P0);
+ } else {
+ sdp_attr_get_fmtp_pack_mode(sdp_p, level, 0, (uint16_t) fmtp_inst, &pack_mode);
+ if (pack_mode == SDP_DEFAULT_PACKETIZATION_MODE_VALUE) {
+ return (RTP_H264_P0);
+ } else {
+ return (RTP_H264_P1);
+ }
+ }
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_VP8) == 0) {
+ return (RTP_VP8);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_VP9) == 0) {
+ return (RTP_VP9);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_RED) == 0) {
+ return (RTP_RED);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_ULPFEC) == 0) {
+ return (RTP_ULPFEC);
+ }
+ if (cpr_strcasecmp(encname, SIPSDP_ATTR_ENCNAME_TELEPHONE_EVENT) == 0) {
+ return (RTP_TELEPHONE_EVENT);
+ }
+ }
+ }
+ }
+
+ return (RTP_NONE);
+}
+
+/* Function: sdp_get_media_payload_type
+ * Description: Returns the payload type of the specified payload for the m=
+ * media token line. If the media line or payload number is
+ * invalid, zero will be returned. Application must validate
+ * the media line before using this routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * payload_num Number of the payload type to retrieve. The
+ * range is (1 - max num payloads).
+ * indicator Returns the type of payload returned, either
+ * NUMERIC or ENUM.
+ * Returns: Payload type or zero.
+ */
+uint32_t sdp_get_media_payload_type (sdp_t *sdp_p, uint16_t level, uint16_t payload_num,
+ sdp_payload_ind_e *indicator)
+{
+ sdp_mca_t *mca_p;
+ rtp_ptype ptype;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (0);
+ }
+
+ if ((payload_num < 1) || (payload_num > mca_p->num_payloads)) {
+ return (0);
+ }
+
+ *indicator = mca_p->payload_indicator[payload_num-1];
+ if ((mca_p->payload_type[payload_num-1] >= SDP_MIN_DYNAMIC_PAYLOAD) &&
+ (mca_p->payload_type[payload_num-1] <= SDP_MAX_DYNAMIC_PAYLOAD)) {
+ ptype = sdp_get_known_payload_type(sdp_p,
+ level,
+ mca_p->payload_type[payload_num-1]);
+ if (ptype != RTP_NONE) {
+ return (SET_PAYLOAD_TYPE_WITH_DYNAMIC(
+ mca_p->payload_type[payload_num-1], ptype));
+ }
+
+ }
+ return (mca_p->payload_type[payload_num-1]);
+}
+
+/* Function: sdp_get_media_profile_payload_type
+ * Description: Returns the payload type of the specified payload for the m=
+ * media token line. If the media line or payload number is
+ * invalid, zero will be returned. Application must validate
+ * the media line before using this routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to of the m= media line. Will be 1-n.
+ * payload_num Number of the payload type to retrieve. The
+ * range is (1 - max num payloads).
+ * indicator Returns the type of payload returned, either
+ * NUMERIC or ENUM.
+ * Returns: Payload type or zero.
+ */
+uint32_t sdp_get_media_profile_payload_type (sdp_t *sdp_p, uint16_t level, uint16_t prof_num,
+ uint16_t payload_num,
+ sdp_payload_ind_e *indicator)
+{
+ sdp_mca_t *mca_p;
+ sdp_media_profiles_t *prof_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (0);
+ }
+
+ prof_p = mca_p->media_profiles_p;
+ if ((prof_num < 1) ||
+ (prof_num > prof_p->num_profiles)) {
+ return (0);
+ }
+
+ if ((payload_num < 1) ||
+ (payload_num > prof_p->num_payloads[prof_num-1])) {
+ return (0);
+ }
+
+ *indicator = prof_p->payload_indicator[prof_num-1][payload_num-1];
+ return (prof_p->payload_type[prof_num-1][payload_num-1]);
+}
+
+/* Function: sdp_insert_media_line
+ * Description: Insert a new media line at the level specified for the
+ * given SDP.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The new media level to insert. Will be 1-n.
+ * Returns: SDP_SUCCESS, SDP_NO_RESOURCE, or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_insert_media_line (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+ sdp_mca_t *new_mca_p;
+
+ if ((level < 1) || (level > (sdp_p->mca_count+1))) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Invalid media line (%u) to insert, max is "
+ "(%u).", sdp_p->debug_str, (unsigned)level, (unsigned)sdp_p->mca_count);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Allocate resource for new media stream. */
+ new_mca_p = sdp_alloc_mca(0);
+ if (new_mca_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+
+ if (level == 1) {
+ /* We're inserting the first media line */
+ new_mca_p->next_p = sdp_p->mca_p;
+ sdp_p->mca_p = new_mca_p;
+ } else {
+ /* Find the pointer to the media stream just prior to where
+ * we want to insert the new stream.
+ */
+ mca_p = sdp_find_media_level(sdp_p, (uint16_t)(level-1));
+ if (mca_p == NULL) {
+ SDP_FREE(new_mca_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ new_mca_p->next_p = mca_p->next_p;
+ mca_p->next_p = new_mca_p;
+ }
+
+ sdp_p->mca_count++;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_media_type
+ * Description: Sets the value of the media type parameter for the m=
+ * media token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to set the param. Will be 1-n.
+ * media Media type for the media line.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_set_media_type (sdp_t *sdp_p, uint16_t level, sdp_media_e media)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ mca_p->media = media;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_set_media_portnum
+ * Description: Sets the value of the port number parameter for the m=
+ * media token line. If the port number is not valid with the
+ * port format specified for the media line, this call will
+ * fail.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to set the param. Will be 1-n.
+ * portnum Port number to set.
+ * sctpport sctp port for application m= line
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_set_media_portnum (sdp_t *sdp_p, uint16_t level, int32_t portnum, int32_t sctp_port)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ mca_p->port = portnum;
+ mca_p->sctpport = sctp_port;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_get_media_sctp_port
+ * Description: Gets the value of the sctp port number parameter for the m=
+ * media token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to set the param. Will be 1-n.
+ * Returns: sctp_port or -1 on failure
+ */
+int32_t sdp_get_media_sctp_port(sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (!mca_p) {
+ sdp_p->conf_p->num_invalid_param++;
+ return -1;
+ }
+
+ return mca_p->sctpport;
+}
+
+/* Function: sdp_set_media_transport
+ * Description: Sets the value of the transport type parameter for the m=
+ * media token line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to set the param. Will be 1-n.
+ * transport The transport type to set.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_set_media_transport (sdp_t *sdp_p, uint16_t level,
+ sdp_transport_e transport)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ mca_p->transport = transport;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_add_media_profile
+ * Description: Add a new profile type for the m= media token line. This is
+ * used for AAL2 transport/profile types where more than one can
+ * be specified per media line. All other transport types should
+ * use the other transport access routines rather than this.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to add the param. Will be 1-n.
+ * profile The profile type to add.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_add_media_profile (sdp_t *sdp_p, uint16_t level,
+ sdp_transport_e profile)
+{
+ uint16_t prof_num;
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (mca_p->media_profiles_p == NULL) {
+ mca_p->media_profiles_p = (sdp_media_profiles_t *) \
+ SDP_MALLOC(sizeof(sdp_media_profiles_t));
+ if (mca_p->media_profiles_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ } else {
+ mca_p->media_profiles_p->num_profiles = 0;
+ /* Set the transport type to this first profile type. */
+ mca_p->transport = profile;
+ }
+ }
+
+ if (mca_p->media_profiles_p->num_profiles >= SDP_MAX_PROFILES) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Max number of media profiles already specified"
+ " for media level %u", sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ prof_num = mca_p->media_profiles_p->num_profiles++;
+ mca_p->media_profiles_p->profile[prof_num] = profile;
+ mca_p->media_profiles_p->num_payloads[prof_num] = 0;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_add_media_payload_type
+ * Description: Add a new payload type for the media line at the level
+ * specified. The new payload type will be added at the end
+ * of the payload type list.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to add the payload. Will be 1-n.
+ * payload_type The new payload type.
+ * indicator Defines the type of payload returned, either
+ * NUMERIC or ENUM.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_add_media_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t payload_type,
+ sdp_payload_ind_e indicator)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (mca_p->num_payloads == SDP_MAX_PAYLOAD_TYPES) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Max number of payload types already defined "
+ "for media line %u", sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ mca_p->payload_indicator[mca_p->num_payloads] = indicator;
+ mca_p->payload_type[mca_p->num_payloads++] = payload_type;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_add_media_profile_payload_type
+ * Description: Add a new payload type for the media line at the level
+ * specified. The new payload type will be added at the end
+ * of the payload type list.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The media level to add the payload. Will be 1-n.
+ * prof_num The profile number to add the payload type.
+ * payload_type The new payload type.
+ * indicator Defines the type of payload returned, either
+ * NUMERIC or ENUM.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_add_media_profile_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t prof_num, uint16_t payload_type,
+ sdp_payload_ind_e indicator)
+{
+ uint16_t num_payloads;
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if ((prof_num < 1) ||
+ (prof_num > mca_p->media_profiles_p->num_profiles)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Invalid profile number (%u) for set profile "
+ " payload type", sdp_p->debug_str, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (mca_p->media_profiles_p->num_payloads[prof_num-1] ==
+ SDP_MAX_PAYLOAD_TYPES) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Max number of profile payload types already "
+ "defined profile %u on media line %u",
+ sdp_p->debug_str, (unsigned)prof_num, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Get the current num payloads for this profile, and inc the number
+ * of payloads at the same time. Then store the new payload type. */
+ num_payloads = mca_p->media_profiles_p->num_payloads[prof_num-1]++;
+ mca_p->media_profiles_p->payload_indicator[prof_num-1][num_payloads] =
+ indicator;
+ mca_p->media_profiles_p->payload_type[prof_num-1][num_payloads] =
+ payload_type;
+ return (SDP_SUCCESS);
+}
+
+/*
+ * sdp_find_bw_line
+ *
+ * This helper function locates a specific bw line instance given the
+ * sdp, the level and the instance number of the bw line.
+ *
+ * Returns: Pointer to the sdp_bw_data_t instance, or NULL.
+ */
+sdp_bw_data_t* sdp_find_bw_line (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ sdp_bw_t *bw_p;
+ sdp_bw_data_t *bw_data_p;
+ sdp_mca_t *mca_p;
+ int bw_attr_count=0;
+
+ if (level == SDP_SESSION_LEVEL) {
+ bw_p = &(sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ }
+ bw_p = &(mca_p->bw);
+ }
+
+ for (bw_data_p = bw_p->bw_data_list;
+ bw_data_p != NULL;
+ bw_data_p = bw_data_p->next_p) {
+ bw_attr_count++;
+ if (bw_attr_count == inst_num) {
+ return bw_data_p;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * sdp_copy_all_bw_lines
+ *
+ * Appends all the bw lines from the specified level of the orig sdp to the
+ * specified level of the dst sdp.
+ *
+ * Parameters: src_sdp_p The source SDP handle.
+ * dst_sdp_p The dest SDP handle.
+ * src_level The level in the src sdp from where to get the
+ * attributes.
+ * dst_level The level in the dst sdp where to put the
+ * attributes.
+ * Returns: SDP_SUCCESS Attributes were successfully copied.
+ */
+sdp_result_e sdp_copy_all_bw_lines (sdp_t *src_sdp_p, sdp_t *dst_sdp_p,
+ uint16_t src_level, uint16_t dst_level)
+{
+ sdp_bw_data_t *orig_bw_data_p;
+ sdp_bw_data_t *new_bw_data_p;
+ sdp_bw_data_t *bw_data_p;
+ sdp_bw_t *src_bw_p;
+ sdp_bw_t *dst_bw_p;
+ sdp_mca_t *mca_p;
+
+ /* Find src bw list */
+ if (src_level == SDP_SESSION_LEVEL) {
+ src_bw_p = &(src_sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(src_sdp_p, src_level);
+ if (mca_p == NULL) {
+ if (src_sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Invalid src media level (%u) for copy all "
+ "attrs ", src_sdp_p->debug_str, (unsigned)src_level);
+ }
+ return (SDP_INVALID_PARAMETER);
+ }
+ src_bw_p = &(mca_p->bw);
+ }
+
+ /* Find dst bw list */
+ if (dst_level == SDP_SESSION_LEVEL) {
+ dst_bw_p = &(dst_sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(dst_sdp_p, dst_level);
+ if (mca_p == NULL) {
+ if (src_sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Invalid dst media level (%u) for copy all "
+ "attrs ", src_sdp_p->debug_str, (unsigned)dst_level);
+ }
+ return (SDP_INVALID_PARAMETER);
+ }
+ dst_bw_p = &(mca_p->bw);
+ }
+
+ orig_bw_data_p = src_bw_p->bw_data_list;
+ while (orig_bw_data_p) {
+ /* For ever bw line in the src, allocate a new one for the dst */
+ new_bw_data_p = (sdp_bw_data_t*)SDP_MALLOC(sizeof(sdp_bw_data_t));
+ if (new_bw_data_p == NULL) {
+ return (SDP_NO_RESOURCE);
+ }
+ new_bw_data_p->next_p = NULL;
+ new_bw_data_p->bw_modifier = orig_bw_data_p->bw_modifier;
+ new_bw_data_p->bw_val = orig_bw_data_p->bw_val;
+
+ /*
+ * Enqueue the sdp_bw_data_t instance at the end of the list of
+ * sdp_bw_data_t instances.
+ */
+ if (dst_bw_p->bw_data_list == NULL) {
+ dst_bw_p->bw_data_list = new_bw_data_p;
+ } else {
+ for (bw_data_p = dst_bw_p->bw_data_list;
+ bw_data_p->next_p != NULL;
+ bw_data_p = bw_data_p->next_p) {
+
+ /*sa_ignore EMPTYLOOP*/
+ ; /* Do nothing. */
+ }
+
+ bw_data_p->next_p = new_bw_data_p;
+ }
+ dst_bw_p->bw_data_count++;
+
+ orig_bw_data_p = orig_bw_data_p->next_p;
+ }
+
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_get_bw_modifier
+ * Description: Returns the bandwidth modifier parameter from the b=
+ * line. If no bw modifier has been set ,
+ * SDP_BW_MODIFIER_UNSUPPORTED will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level from which to get the bw modifier.
+ * inst_num instance number of bw line at that level. The first
+ * instance has a inst_num of 1 and so on.
+ * Returns: Valid modifer value or SDP_BW_MODIFIER_UNSUPPORTED.
+ */
+sdp_bw_modifier_e sdp_get_bw_modifier (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ sdp_bw_data_t *bw_data_p;
+
+ bw_data_p = sdp_find_bw_line(sdp_p, level, inst_num);
+
+ if (bw_data_p) {
+ return (bw_data_p->bw_modifier);
+ } else {
+ return (SDP_BW_MODIFIER_UNSUPPORTED);
+ }
+}
+
+/* Function: sdp_get_bw_value
+ * Description: Returns the bandwidth value parameter from the b=
+ * line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level from which to get the bw value.
+ * inst_num instance number of bw line at the level. The first
+ * instance has a inst_num of 1 and so on.
+ * Returns: A valid numerical bw value or SDP_INVALID_VALUE.
+ */
+int32_t sdp_get_bw_value (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ sdp_bw_data_t *bw_data_p;
+
+ bw_data_p = sdp_find_bw_line(sdp_p, level, inst_num);
+
+ if (bw_data_p) {
+ return (bw_data_p->bw_val);
+ } else {
+ return (SDP_INVALID_VALUE);
+ }
+}
+
+/*
+ * sdp_get_num_bw_lines
+ *
+ * Returns the number of bw lines are present at a given level.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level at which the count of bw lines is required
+ *
+ * Returns: A valid count or SDP_INVALID_VALUE
+ */
+int32_t sdp_get_num_bw_lines (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_bw_t *bw_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ bw_p = &(sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+ bw_p = &(mca_p->bw);
+ }
+
+ return (bw_p->bw_data_count);
+}
+
+/*
+ * sdp_add_new_bw_line
+ *
+ * To specify bandwidth parameters at any level, a bw line must first be
+ * added at that level using this function. After this addition, you can set
+ * the properties of the added bw line by using sdp_set_bw().
+ *
+ * Note carefully though, that since there can be multiple instances of bw
+ * lines at any level, you must specify the instance number when setting
+ * or getting the properties of a bw line at any level.
+ *
+ * This function returns within the inst_num variable, the instance number
+ * of the created bw_line at that level. The instance number is 1-based.
+ * For example:
+ * v=0 #Session Level
+ * o=mhandley 2890844526 2890842807 IN IP4 126.16.64.4
+ * s=SDP Seminar
+ * c=IN IP4 10.1.0.2
+ * t=0 0
+ * b=AS:60 # instance number 1
+ * b=TIAS:50780 # instance number 2
+ * m=audio 1234 RTP/AVP 0 101 102 # 1st Media level
+ * b=AS:12 # instance number 1
+ * b=TIAS:8480 # instance number 2
+ * m=audio 1234 RTP/AVP 0 101 102 # 2nd Media level
+ * b=AS:20 # instance number 1
+ *
+ * Parameters:
+ * sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to create the bw line.
+ * bw_modifier The Type of bandwidth, CT, AS or TIAS.
+ * *inst_num This memory is set with the instance number of the newly
+ * created bw line instance.
+ */
+sdp_result_e sdp_add_new_bw_line (sdp_t *sdp_p, uint16_t level, sdp_bw_modifier_e bw_modifier, uint16_t *inst_num)
+{
+ sdp_bw_t *bw_p;
+ sdp_mca_t *mca_p;
+ sdp_bw_data_t *new_bw_data_p;
+ sdp_bw_data_t *bw_data_p = NULL;
+
+ *inst_num = 0;
+
+ if (level == SDP_SESSION_LEVEL) {
+ bw_p = &(sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ bw_p = &(mca_p->bw);
+ }
+
+ //see if a bw line already exist for this bw_modifier.
+ for(bw_data_p = bw_p->bw_data_list; bw_data_p != NULL; bw_data_p = bw_data_p->next_p) {
+ ++(*inst_num);
+ if (bw_data_p->bw_modifier == bw_modifier) {
+ return (SDP_SUCCESS);
+ }
+ }
+
+ /*
+ * Allocate a new sdp_bw_data_t instance and set it's values from the
+ * input parameters.
+ */
+ new_bw_data_p = (sdp_bw_data_t*)SDP_MALLOC(sizeof(sdp_bw_data_t));
+ if (new_bw_data_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+ new_bw_data_p->next_p = NULL;
+ new_bw_data_p->bw_modifier = SDP_BW_MODIFIER_UNSUPPORTED;
+ new_bw_data_p->bw_val = 0;
+
+ /*
+ * Enqueue the sdp_bw_data_t instance at the end of the list of
+ * sdp_bw_data_t instances.
+ */
+ if (bw_p->bw_data_list == NULL) {
+ bw_p->bw_data_list = new_bw_data_p;
+ } else {
+ for (bw_data_p = bw_p->bw_data_list;
+ bw_data_p->next_p != NULL;
+ bw_data_p = bw_data_p->next_p) {
+
+ /*sa_ignore EMPTYLOOP*/
+ ; /* Do nothing. */
+ }
+
+ bw_data_p->next_p = new_bw_data_p;
+ }
+ *inst_num = ++bw_p->bw_data_count;
+
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_get_mid_value
+ * Description: Returns the mid value parameter from the a= mid: line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level SDP_MEDIA_LEVEL
+ * Returns: mid value.
+ */
+int32_t sdp_get_mid_value (sdp_t *sdp_p, uint16_t level)
+{
+ sdp_mca_t *mca_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+ return (mca_p->mid);
+}
+
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_attr.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_attr.c
new file mode 100644
index 000000000..e3afa2637
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_attr.c
@@ -0,0 +1,5120 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+
+#include "plstr.h"
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+#include "sdp_base64.h"
+
+#include "CSFLog.h"
+
+static const char* logTag = "sdp_attr";
+
+/*
+ * Macro for sdp_build_attr_fmtp
+ * Adds name-value pair where value is char*
+ */
+#define FMTP_BUILD_STRING(condition, name, value) \
+ if ((condition)) { \
+ sdp_append_name_and_string(fs, (name), (value), semicolon); \
+ semicolon = TRUE; \
+ }
+
+/*
+ * Macro for sdp_build_attr_fmtp
+ * Adds name-value pair where value is unsigned
+ */
+#define FMTP_BUILD_UNSIGNED(condition, name, value) \
+ if ((condition)) { \
+ sdp_append_name_and_unsigned(fs, (name), (value), semicolon); \
+ semicolon = TRUE; \
+ }
+
+/*
+ * Macro for sdp_build_attr_fmtp
+ * Adds flag string on condition
+ */
+#define FMTP_BUILD_FLAG(condition, name) \
+ if ((condition)) { \
+ if (semicolon) { \
+ flex_string_append(fs, ";"); \
+ } \
+ flex_string_append(fs, name); \
+ semicolon = TRUE; \
+ }
+
+static int find_token_enum(const char *attr_name,
+ sdp_t *sdp_p,
+ const char **ptr,
+ const sdp_namearray_t *types,
+ int type_count,
+ int unknown_value)
+{
+ sdp_result_e result = SDP_SUCCESS;
+ char tmp[SDP_MAX_STRING_LEN+1];
+ int i;
+
+ *ptr = sdp_getnextstrtok(*ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: problem parsing %s", sdp_p->debug_str, attr_name);
+ sdp_p->conf_p->num_invalid_param++;
+ return -1;
+ }
+
+ for (i=0; i < type_count; i++) {
+ if (!cpr_strncasecmp(tmp, types[i].name, types[i].strlen)) {
+ return i;
+ }
+ }
+ return unknown_value;
+}
+
+/*
+ * Helper function for adding nv-pair where value is string.
+ */
+static void sdp_append_name_and_string(flex_string *fs,
+ const char *name,
+ const char *value,
+ tinybool semicolon)
+{
+ flex_string_sprintf(fs, "%s%s=%s",
+ semicolon ? ";" : "",
+ name,
+ value);
+}
+
+/*
+ * Helper function for adding nv-pair where value is unsigned.
+ */
+static void sdp_append_name_and_unsigned(flex_string *fs,
+ const char *name,
+ unsigned int value,
+ tinybool semicolon)
+{
+ flex_string_sprintf(fs, "%s%s=%u",
+ semicolon ? ";" : "",
+ name,
+ value);
+}
+
+/* Function: sdp_parse_attribute
+ * Description: Figure out the type of attribute and call the appropriate
+ * parsing routine. If parsing errors are encountered,
+ * warnings will be printed and the attribute will be ignored.
+ * Unrecognized/invalid attributes do not cause overall parsing
+ * errors. All errors detected are noted as warnings.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * ptr Pointer to the attribute string to parse.
+ */
+sdp_result_e sdp_parse_attribute (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ int i;
+ uint8_t xcpar_flag = FALSE;
+ sdp_result_e result;
+ sdp_mca_t *mca_p=NULL;
+ sdp_attr_t *attr_p;
+ sdp_attr_t *next_attr_p;
+ sdp_attr_t *prev_attr_p = NULL;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Validate the level */
+ if (level != SDP_SESSION_LEVEL) {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ }
+
+ /* Find the attribute type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), ": \t", &result);
+ if (ptr == NULL) {
+ sdp_parse_error(sdp_p,
+ "%s No attribute type specified, parse failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ if (ptr[0] == ':') {
+ /* Skip the ':' char for parsing attribute parameters. */
+ ptr++;
+ }
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No attribute type specified, parse failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p = (sdp_attr_t *)SDP_MALLOC(sizeof(sdp_attr_t));
+ if (attr_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+ attr_p->line_number = sdp_p->parse_line;
+ attr_p->type = SDP_ATTR_INVALID;
+ attr_p->next_p = NULL;
+ for (i=0; i < SDP_MAX_ATTR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_attr[i].name, sdp_attr[i].strlen) == 0) {
+ attr_p->type = (sdp_attr_e)i;
+ break;
+ }
+ }
+ if (attr_p->type == SDP_ATTR_INVALID) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unrecognized attribute (%s) ",
+ sdp_p->debug_str, tmp);
+ sdp_free_attr(attr_p);
+ return (SDP_SUCCESS);
+ }
+
+ /* If this is an X-cpar or cpar attribute, set the flag. The attribute
+ * type will be changed by the parse. */
+ if ((attr_p->type == SDP_ATTR_X_CPAR) ||
+ (attr_p->type == SDP_ATTR_CPAR)) {
+ xcpar_flag = TRUE;
+ }
+
+ /* Parse the attribute. */
+ result = sdp_attr[attr_p->type].parse_func(sdp_p, attr_p, ptr);
+ if (result != SDP_SUCCESS) {
+ sdp_free_attr(attr_p);
+ /* Return success so the parse won't fail. We don't want to
+ * fail on errors with attributes but just ignore them.
+ */
+ return (SDP_SUCCESS);
+ }
+
+ /* If this was an X-cpar/cpar attribute, it was hooked into the X-cap/cdsc
+ * structure, so we're finished.
+ */
+ if (xcpar_flag == TRUE) {
+ return (result);
+ }
+
+ /* Add the attribute in the appropriate place. */
+ if (level == SDP_SESSION_LEVEL) {
+ for (next_attr_p = sdp_p->sess_attrs_p; next_attr_p != NULL;
+ prev_attr_p = next_attr_p,
+ next_attr_p = next_attr_p->next_p) {
+ ; /* Empty for */
+ }
+ if (prev_attr_p == NULL) {
+ sdp_p->sess_attrs_p = attr_p;
+ } else {
+ prev_attr_p->next_p = attr_p;
+ }
+ } else {
+ for (next_attr_p = mca_p->media_attrs_p; next_attr_p != NULL;
+ prev_attr_p = next_attr_p,
+ next_attr_p = next_attr_p->next_p) {
+ ; /* Empty for */
+ }
+ if (prev_attr_p == NULL) {
+ mca_p->media_attrs_p = attr_p;
+ } else {
+ prev_attr_p->next_p = attr_p;
+ }
+ }
+
+ return (result);
+}
+
+/* Build all of the attributes defined for the specified level. */
+sdp_result_e sdp_build_attribute (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *mca_p=NULL;
+ sdp_result_e result;
+
+ if (level == SDP_SESSION_LEVEL) {
+ attr_p = sdp_p->sess_attrs_p;
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ attr_p = mca_p->media_attrs_p;
+ }
+ /* Re-initialize the current capability number for this new level. */
+ sdp_p->cur_cap_num = 1;
+
+ /* Build all of the attributes for this level. Note that if there
+ * is a problem building an attribute, we don't fail but just ignore it.*/
+ while (attr_p != NULL) {
+ if (attr_p->type >= SDP_MAX_ATTR_TYPES) {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Invalid attribute type to build (%u)",
+ sdp_p->debug_str, (unsigned)attr_p->type);
+ }
+ } else {
+ result = sdp_attr[attr_p->type].build_func(sdp_p, attr_p, fs);
+
+ if (result != SDP_SUCCESS) {
+ CSFLogError(logTag, "%s error building attribute %d", __FUNCTION__, result);
+ return result;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built a=%s attribute line", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+ }
+ attr_p = attr_p->next_p;
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_simple_string (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.string_val,
+ sizeof(attr_p->attr.string_val), " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No string token found for %s attribute",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.string_val);
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+sdp_result_e sdp_build_attr_simple_string (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n", sdp_attr[attr_p->type].name,
+ attr_p->attr.string_val);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_simple_u32 (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ attr_p->attr.u32_val = sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Numeric token for %s attribute not found",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %u", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type), attr_p->attr.u32_val);
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+sdp_result_e sdp_build_attr_simple_u32 (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%u\r\n", sdp_attr[attr_p->type].name,
+ attr_p->attr.u32_val);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_simple_bool (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ if (sdp_getnextnumtok(ptr, &ptr, " \t", &result) == 0) {
+ attr_p->attr.boolean_val = FALSE;
+ } else {
+ attr_p->attr.boolean_val= TRUE;
+ }
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Boolean token for %s attribute not found",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ if (attr_p->attr.boolean_val) {
+ SDP_PRINT("%s Parsed a=%s, boolean is TRUE", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ } else {
+ SDP_PRINT("%s Parsed a=%s, boolean is FALSE", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+sdp_result_e sdp_build_attr_simple_bool (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n", sdp_attr[attr_p->type].name,
+ attr_p->attr.boolean_val ? "1" : "0");
+
+ return SDP_SUCCESS;
+}
+
+/*
+ * sdp_parse_attr_maxprate
+ *
+ * This function parses maxprate attribute lines. The ABNF for this a=
+ * line is:
+ * max-p-rate-def = "a" "=" "maxprate" ":" packet-rate CRLF
+ * packet-rate = 1*DIGIT ["." 1*DIGIT]
+ *
+ * Returns:
+ * SDP_INVALID_PARAMETER - If we are unable to parse the string OR if
+ * packet-rate is not in the right format as per
+ * the ABNF.
+ *
+ * SDP_SUCCESS - If we are able to successfully parse the a= line.
+ */
+sdp_result_e sdp_parse_attr_maxprate (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.string_val,
+ sizeof(attr_p->attr.string_val), " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No string token found for %s attribute",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (!sdp_validate_maxprate(attr_p->attr.string_val)) {
+ sdp_parse_error(sdp_p,
+ "%s is not a valid maxprate value.",
+ attr_p->attr.string_val);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.string_val);
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+/*
+ * sdp_attr_fmtp_no_value
+ * Helper function for sending the warning when a parameter value is
+ * missing.
+ *
+ */
+static void sdp_attr_fmtp_no_value(sdp_t *sdp, const char *param_name)
+{
+ sdp_parse_error(sdp,
+ "%s Warning: No %s value specified for fmtp attribute",
+ sdp->debug_str, param_name);
+ sdp->conf_p->num_invalid_param++;
+}
+
+/*
+ * sdp_attr_fmtp_invalid_value
+ * Helper function for sending the warning when a parameter value is
+ * incorrect.
+ *
+ */
+static void sdp_attr_fmtp_invalid_value(sdp_t *sdp, const char *param_name,
+ const char* param_value)
+{
+ sdp_parse_error(sdp,
+ "%s Warning: Invalid %s: %s specified for fmtp attribute",
+ sdp->debug_str, param_name, param_value);
+ sdp->conf_p->num_invalid_param++;
+}
+
+/*
+ * sdp_verify_attr_fmtp_telephone_event
+ * Helper function for verifying the telephone-event fmtp format
+ */
+static sdp_result_e sdp_verify_attr_fmtp_telephone_event(char *fmtpVal)
+{
+ size_t len = PL_strlen(fmtpVal);
+
+ // make sure the basics are good:
+ // - at least 1 character
+ // - no illegal chars
+ // - first char is a number
+ if (len < 1
+ || strspn(fmtpVal, "0123456789,-") != len
+ || PL_strstr(fmtpVal, ",,")
+ || fmtpVal[len-1] == ','
+ || !('0' <= fmtpVal[0] && fmtpVal[0] <= '9')) {
+ return SDP_INVALID_PARAMETER;
+ }
+
+ // Now that we've passed the basic sanity test, copy the string so we
+ // can tokenize and check the format of the tokens without disturbing
+ // the input string.
+ char dtmf_tones[SDP_MAX_STRING_LEN+1];
+ PL_strncpyz(dtmf_tones, fmtpVal, sizeof(dtmf_tones));
+
+ char *strtok_state;
+ char *temp = PL_strtok_r(dtmf_tones, ",", &strtok_state);
+
+ while (temp != NULL) {
+ len = PL_strlen(temp);
+ if (len > 5) {
+ // an example of a max size token is "11-15", so if the
+ // token is longer than 5 it is bad
+ return SDP_INVALID_PARAMETER;
+ }
+
+ // case where we have 1 or 2 characters, example 4 or 23
+ if (len < 3 && strspn(temp, "0123456789") != len) {
+ return SDP_INVALID_PARAMETER;
+ } else if (len >= 3) {
+ // case where we have 3-5 characters, ex 3-5, 2-33, or 10-20
+ sdp_result_e result1 = SDP_SUCCESS;
+ sdp_result_e result2 = SDP_SUCCESS;
+ uint8_t low_val;
+ uint8_t high_val;
+ low_val = (uint8_t)sdp_getnextnumtok(temp, (const char **)&temp,
+ "-", &result1);
+ high_val = (uint8_t)sdp_getnextnumtok(temp, (const char **)&temp,
+ "-", &result2);
+ if (temp[0] // we don't want to find a second hyphen
+ || result1 != SDP_SUCCESS
+ || result2 != SDP_SUCCESS) {
+ return SDP_INVALID_PARAMETER;
+ }
+
+ if (low_val > 99
+ || high_val > 99
+ || high_val <= low_val) {
+ return SDP_INVALID_PARAMETER;
+ }
+ }
+
+ temp=PL_strtok_r(NULL, ",", &strtok_state);
+ }
+
+ return SDP_SUCCESS;
+}
+
+/* Note: The fmtp attribute formats currently handled are:
+ * fmtp:<payload type> <event>,<event>...
+ * fmtp:<payload_type> [annexa=yes/no] [annexb=yes/no] [bitrate=<value>]
+ * [QCIF =<value>] [CIF =<value>] [MaxBR = <value>] one or more
+ * Other FMTP params as per H.263, H.263+, H.264 codec support.
+ * Note -"value" is a numeric value > 0 and each event is a
+ * single number or a range separated by a '-'.
+ * Example: fmtp:101 1,3-15,20
+ * Video codecs have annexes that can be listed in the following legal formats:
+ * a) a=fmtp:34 param1=token;D;I;J;K=1;N=2;P=1,3
+ * b) a=fmtp:34 param1=token;D;I;J;K=1;N=2;P=1,3;T
+ * c) a=fmtp:34 param1=token;D;I;J
+ *
+ */
+sdp_result_e sdp_get_fmtp_tok(sdp_t *sdp_p,
+ const char** fmtp_ptr,
+ const char* fmtp_name,
+ char* buf,
+ size_t buf_size,
+ char** tok)
+{
+ sdp_result_e result1 = SDP_SUCCESS;
+
+ *fmtp_ptr = sdp_getnextstrtok(*fmtp_ptr, buf, buf_size, "; \t", &result1);
+ if (result1 != SDP_SUCCESS) {
+ *fmtp_ptr = sdp_getnextstrtok(*fmtp_ptr, buf, buf_size, " \t", &result1);
+ if (result1 != SDP_SUCCESS) {
+ sdp_attr_fmtp_no_value(sdp_p, fmtp_name);
+ return SDP_INVALID_PARAMETER;
+ }
+ }
+ *tok = buf;
+ (*tok)++;
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_get_fmtp_tok_val(sdp_t *sdp_p,
+ const char** fmtp_ptr,
+ const char* fmtp_name,
+ char* buf,
+ size_t buf_size,
+ char** tok,
+ unsigned long* strtoul_result,
+ unsigned long illegal_value,
+ unsigned long min_limit,
+ unsigned long max_limit)
+{
+ sdp_result_e result1 = SDP_SUCCESS;
+ unsigned long value;
+ char* strtoul_end;
+
+ result1 = sdp_get_fmtp_tok(sdp_p, fmtp_ptr, fmtp_name, buf, buf_size, tok);
+ if (result1 != SDP_SUCCESS) return result1;
+
+ errno = 0;
+ value = strtoul(*tok, &strtoul_end, 10);
+
+ if (errno
+ || (*tok == strtoul_end)
+ || (illegal_value != -1UL && value == illegal_value)
+ || (min_limit != -1UL && value < min_limit)
+ || (max_limit != -1UL && value > max_limit)) {
+ sdp_attr_fmtp_invalid_value(sdp_p, fmtp_name, *tok);
+ return SDP_INVALID_PARAMETER;
+ }
+ *strtoul_result = value;
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_parse_attr_fmtp (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ uint16_t i;
+ uint32_t mapword;
+ uint32_t bmap;
+ uint8_t low_val;
+ uint8_t high_val;
+ const char *ptr2;
+ const char *fmtp_ptr;
+ sdp_result_e result1 = SDP_SUCCESS;
+ sdp_result_e result2 = SDP_SUCCESS;
+ tinybool done = FALSE;
+ tinybool codec_info_found = FALSE;
+ sdp_fmtp_t *fmtp_p;
+ char tmp[SDP_MAX_STRING_LEN];
+ char *src_ptr;
+ char *temp_ptr = NULL;
+ char *tok=NULL;
+ char *temp=NULL;
+ uint16_t custom_x=0;
+ uint16_t custom_y=0;
+ uint16_t custom_mpi=0;
+ uint16_t par_height=0;
+ uint16_t par_width=0;
+ uint16_t cpcf=0;
+ uint16_t iter=0;
+
+ ulong l_val = 0;
+ char* strtok_state;
+ unsigned long strtoul_result;
+ char* strtoul_end;
+
+ /* Find the payload type number. */
+ attr_p->attr.fmtp.payload_num = (uint16_t)sdp_getnextnumtok(ptr, &ptr,
+ " \t", &result1);
+ if (result1 != SDP_SUCCESS) {
+ sdp_attr_fmtp_no_value(sdp_p, "payload type");
+ return SDP_INVALID_PARAMETER;
+ }
+ fmtp_p = &(attr_p->attr.fmtp);
+ fmtp_p->fmtp_format = SDP_FMTP_UNKNOWN_TYPE;
+ fmtp_p->parameter_add = 1;
+ fmtp_p->flag = 0;
+
+ /*
+ * set default value of packetization mode and level-asymmetry-allowed. If
+ * remote sdp does not specify any value for these two parameters, then the
+ * default value will be assumed for remote sdp. If remote sdp does specify
+ * any value for these parameters, then default value will be overridden.
+ */
+ fmtp_p->packetization_mode = SDP_DEFAULT_PACKETIZATION_MODE_VALUE;
+ fmtp_p->level_asymmetry_allowed = SDP_DEFAULT_LEVEL_ASYMMETRY_ALLOWED_VALUE;
+
+ temp_ptr = cpr_strdup(ptr);
+ if (temp_ptr == NULL) {
+ return (SDP_FAILURE);
+ }
+ fmtp_ptr = src_ptr = temp_ptr;
+
+ src_ptr = temp_ptr;
+ while (!done) {
+ fmtp_ptr = sdp_getnextstrtok(fmtp_ptr, tmp, sizeof(tmp), "= \t", &result1);
+ if (result1 == SDP_SUCCESS) {
+ if (cpr_strncasecmp(tmp, sdp_fmtp_codec_param[1].name,
+ sdp_fmtp_codec_param[1].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "annexb", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ if (cpr_strncasecmp(tok,sdp_fmtp_codec_param_val[0].name,
+ sdp_fmtp_codec_param_val[0].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annexb_required = TRUE;
+ fmtp_p->annexb = TRUE;
+ } else if (cpr_strncasecmp(tok,sdp_fmtp_codec_param_val[1].name,
+ sdp_fmtp_codec_param_val[1].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annexb_required = TRUE;
+ fmtp_p->annexb = FALSE;
+ } else {
+ sdp_attr_fmtp_invalid_value(sdp_p, "annexb", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp, sdp_fmtp_codec_param[0].name,
+ sdp_fmtp_codec_param[0].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "annexa", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ if (cpr_strncasecmp(tok,sdp_fmtp_codec_param_val[0].name,
+ sdp_fmtp_codec_param_val[0].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annexa = TRUE;
+ fmtp_p->annexa_required = TRUE;
+ } else if (cpr_strncasecmp(tok,sdp_fmtp_codec_param_val[1].name,
+ sdp_fmtp_codec_param_val[1].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annexa = FALSE;
+ fmtp_p->annexa_required = TRUE;
+ } else {
+ sdp_attr_fmtp_invalid_value(sdp_p, "annexa", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[2].name,
+ sdp_fmtp_codec_param[2].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "bitrate", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->bitrate = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[41].name,
+ sdp_fmtp_codec_param[41].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "mode", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_MODE;
+ fmtp_p->mode = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[3].name,
+ sdp_fmtp_codec_param[3].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "qcif", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, SDP_MIN_CIF_VALUE, SDP_MAX_CIF_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->qcif = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[4].name,
+ sdp_fmtp_codec_param[4].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "cif", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, SDP_MIN_CIF_VALUE, SDP_MAX_CIF_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->cif = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[5].name,
+ sdp_fmtp_codec_param[5].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "maxbr", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, USHRT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->maxbr = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[6].name,
+ sdp_fmtp_codec_param[6].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "sqcif", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, SDP_MIN_CIF_VALUE, SDP_MAX_CIF_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->sqcif = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[7].name,
+ sdp_fmtp_codec_param[7].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "cif4", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, SDP_MIN_CIF_VALUE, SDP_MAX_CIF_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->cif4 = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[8].name,
+ sdp_fmtp_codec_param[8].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "cif16", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, SDP_MIN_CIF_VALUE, SDP_MAX_CIF_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->cif16 = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[9].name,
+ sdp_fmtp_codec_param[9].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "custom", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ temp=PL_strtok_r(tok, ",", &strtok_state);
+ iter++;
+ if (temp) {
+ iter=1;
+ while (temp != NULL) {
+ errno = 0;
+ strtoul_result = strtoul(temp, &strtoul_end, 10);
+
+ if (errno || temp == strtoul_end || strtoul_result > USHRT_MAX){
+ custom_x = custom_y = custom_mpi = 0;
+ break;
+ }
+
+ if (iter == 1)
+ custom_x = (uint16_t) strtoul_result;
+ if (iter == 2)
+ custom_y = (uint16_t) strtoul_result;
+ if (iter == 3)
+ custom_mpi = (uint16_t) strtoul_result;
+
+ temp=PL_strtok_r(NULL, ",", &strtok_state);
+ iter++;
+ }
+ }
+
+ /* custom x,y and mpi values from tmp */
+ if (!custom_x || !custom_y || !custom_mpi) {
+ sdp_attr_fmtp_invalid_value(sdp_p, "x/y/MPI", temp);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->custom_x = custom_x;
+ fmtp_p->custom_y = custom_y;
+ fmtp_p->custom_mpi = custom_mpi;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[10].name,
+ sdp_fmtp_codec_param[10].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "par", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ temp=PL_strtok_r(tok, ":", &strtok_state);
+ if (temp) {
+ iter=1;
+ /* get par width and par height for the aspect ratio */
+ while (temp != NULL) {
+ errno = 0;
+ strtoul_result = strtoul(temp, &strtoul_end, 10);
+
+ if (errno || temp == strtoul_end || strtoul_result > USHRT_MAX) {
+ par_width = par_height = 0;
+ break;
+ }
+
+ if (iter == 1)
+ par_width = (uint16_t) strtoul_result;
+ else
+ par_height = (uint16_t) strtoul_result;
+
+ temp=PL_strtok_r(NULL, ",", &strtok_state);
+ iter++;
+ }
+ }
+ if (!par_width || !par_height) {
+ sdp_attr_fmtp_invalid_value(sdp_p, "par_width or par_height", temp);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->par_width = par_width;
+ fmtp_p->par_height = par_height;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[11].name,
+ sdp_fmtp_codec_param[11].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "cpcf", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ temp=PL_strtok_r(tok, ".", &strtok_state);
+ if ( temp != NULL ) {
+ errno = 0;
+ strtoul_result = strtoul(temp, &strtoul_end, 10);
+
+ if (errno || temp == strtoul_end || strtoul_result > USHRT_MAX) {
+ cpcf = 0;
+ } else {
+ cpcf = (uint16_t) strtoul_result;
+ }
+ }
+
+ if (!cpcf) {
+ sdp_attr_fmtp_invalid_value(sdp_p, "cpcf", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->cpcf = cpcf;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[12].name,
+ sdp_fmtp_codec_param[12].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "bpp", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, USHRT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->bpp = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[13].name,
+ sdp_fmtp_codec_param[13].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "hrd", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, USHRT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->hrd = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[14].name,
+ sdp_fmtp_codec_param[14].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "profile", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, SDP_MAX_PROFILE_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->profile = (short) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[15].name,
+ sdp_fmtp_codec_param[15].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "level", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, SDP_MAX_LEVEL_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->level = (short) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[16].name,
+ sdp_fmtp_codec_param[16].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->is_interlace = TRUE;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[17].name,
+ sdp_fmtp_codec_param[17].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "profile_level_id", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ sstrncpy(fmtp_p->profile_level_id , tok, sizeof(fmtp_p->profile_level_id));
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[18].name,
+ sdp_fmtp_codec_param[18].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "parameter_sets", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ sstrncpy(fmtp_p->parameter_sets , tok, sizeof(fmtp_p->parameter_sets));
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[19].name,
+ sdp_fmtp_codec_param[19].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "packetization_mode", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, 2);
+ // this one is different for some reason. Most others don't increment
+ // the num_invalid_param field. (mjf)
+ if (result1 == SDP_INVALID_PARAMETER) { sdp_p->conf_p->num_invalid_param++; }
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->packetization_mode = (int16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[20].name,
+ sdp_fmtp_codec_param[20].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "interleaving_depth", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, USHRT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->interleaving_depth = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[21].name,
+ sdp_fmtp_codec_param[21].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "deint_buf", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ if (sdp_checkrange(sdp_p, tok, &l_val) == TRUE) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->deint_buf_req = (uint32_t) l_val;
+ fmtp_p->flag |= SDP_DEINT_BUF_REQ_FLAG;
+ codec_info_found = TRUE;
+ } else {
+ sdp_attr_fmtp_invalid_value(sdp_p, "deint_buf_req", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[22].name,
+ sdp_fmtp_codec_param[22].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max_don_diff", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_don_diff = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[23].name,
+ sdp_fmtp_codec_param[23].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "init_buf_time", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ if (sdp_checkrange(sdp_p, tok, &l_val) == TRUE) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->init_buf_time = (uint32_t) l_val;
+ fmtp_p->flag |= SDP_INIT_BUF_TIME_FLAG;
+ codec_info_found = TRUE;
+ } else {
+ sdp_attr_fmtp_invalid_value(sdp_p, "init_buf_time", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[24].name,
+ sdp_fmtp_codec_param[24].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max_mbps", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_mbps = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[25].name,
+ sdp_fmtp_codec_param[25].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max-fs", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_fs = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[26].name,
+ sdp_fmtp_codec_param[26].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max_cbp", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_cpb = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[27].name,
+ sdp_fmtp_codec_param[27].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max_dpb", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_dpb = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[28].name,
+ sdp_fmtp_codec_param[28].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max_br", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_br = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[29].name,
+ sdp_fmtp_codec_param[29].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "redundant_pic_cap", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, 1);
+ fmtp_p->redundant_pic_cap = (result1 == SDP_SUCCESS);
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[30].name,
+ sdp_fmtp_codec_param[30].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "deint_buf_cap", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ if (sdp_checkrange(sdp_p, tok, &l_val) == TRUE) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->deint_buf_cap = (uint32_t) l_val;
+ fmtp_p->flag |= SDP_DEINT_BUF_CAP_FLAG;
+ codec_info_found = TRUE;
+ } else {
+ sdp_attr_fmtp_invalid_value(sdp_p, "deint_buf_cap", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[31].name,
+ sdp_fmtp_codec_param[31].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "max_rcmd_nalu_size", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ if (sdp_checkrange(sdp_p, tok, &l_val) == TRUE) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_rcmd_nalu_size = (uint32_t) l_val;
+ fmtp_p->flag |= SDP_MAX_RCMD_NALU_SIZE_FLAG;
+ codec_info_found = TRUE;
+ } else {
+ sdp_attr_fmtp_invalid_value(sdp_p, "max_rcmd_nalu_size", tok);
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[32].name,
+ sdp_fmtp_codec_param[32].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "parameter_add", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, 1);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->parameter_add = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[33].name,
+ sdp_fmtp_codec_param[33].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_d = TRUE;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[34].name,
+ sdp_fmtp_codec_param[34].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_f = TRUE;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[35].name,
+ sdp_fmtp_codec_param[35].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_i = TRUE;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[36].name,
+ sdp_fmtp_codec_param[36].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_j = TRUE;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[37].name,
+ sdp_fmtp_codec_param[36].strlen) == 0) {
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_t = TRUE;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[38].name,
+ sdp_fmtp_codec_param[38].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "annex_k", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, USHRT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_k_val = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[39].name,
+ sdp_fmtp_codec_param[39].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "annex_n", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, USHRT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->annex_n_val = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[40].name,
+ sdp_fmtp_codec_param[40].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "annex_p", tmp, sizeof(tmp), &tok);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->annex_p_val_picture_resize = 0;
+ fmtp_p->annex_p_val_warp = 0;
+ temp = PL_strtok_r(tok, ",", &strtok_state);
+ if (temp) {
+ iter=1;
+ while (temp != NULL) {
+ errno = 0;
+ strtoul_result = strtoul(temp, &strtoul_end, 10);
+
+ if (errno || temp == strtoul_end || strtoul_result > USHRT_MAX) {
+ break;
+ }
+
+ if (iter == 1)
+ fmtp_p->annex_p_val_picture_resize = (uint16_t) strtoul_result;
+ else if (iter == 2)
+ fmtp_p->annex_p_val_warp = (uint16_t) strtoul_result;
+
+ temp = PL_strtok_r(NULL, ",", &strtok_state);
+ iter++;
+ }
+ } else {
+ SDP_FREE(temp_ptr);
+ return SDP_INVALID_PARAMETER;
+ }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[42].name,
+ sdp_fmtp_codec_param[42].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "level_asymmetry_allowed", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, SDP_MAX_LEVEL_ASYMMETRY_ALLOWED_VALUE);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->level_asymmetry_allowed = (int) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[43].name,
+ sdp_fmtp_codec_param[43].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "maxaveragebitrate", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->maxaveragebitrate = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[44].name,
+ sdp_fmtp_codec_param[44].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "usedtx", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, 1);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->usedtx = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[45].name,
+ sdp_fmtp_codec_param[45].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "stereo", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, 1);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->stereo = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[46].name,
+ sdp_fmtp_codec_param[46].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "useinbandfec", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, 1);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->useinbandfec = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[47].name,
+ sdp_fmtp_codec_param[47].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok(sdp_p, &fmtp_ptr, "maxcodedaudiobandwidth", tmp, sizeof(tmp), &tok);
+ // this one is different for some reason. Most others don't increment
+ // the num_invalid_param field. (mjf)
+ if (result1 == SDP_INVALID_PARAMETER) { sdp_p->conf_p->num_invalid_param++; }
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ sstrncpy(fmtp_p->maxcodedaudiobandwidth , tok, sizeof(fmtp_p->maxcodedaudiobandwidth));
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[48].name,
+ sdp_fmtp_codec_param[48].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "cbr", tmp, sizeof(tmp),
+ &tok, &strtoul_result, -1, -1, 1);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->cbr = (uint16_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[49].name,
+ sdp_fmtp_codec_param[49].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "max-fr", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->max_fr = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (cpr_strncasecmp(tmp,sdp_fmtp_codec_param[50].name,
+ sdp_fmtp_codec_param[50].strlen) == 0) {
+ result1 = sdp_get_fmtp_tok_val(sdp_p, &fmtp_ptr, "maxplaybackrate", tmp, sizeof(tmp),
+ &tok, &strtoul_result, 0, -1, UINT_MAX);
+ if (result1 != SDP_SUCCESS) { SDP_FREE(temp_ptr); return result1; }
+
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+ fmtp_p->maxplaybackrate = (uint32_t) strtoul_result;
+ codec_info_found = TRUE;
+
+ } else if (fmtp_ptr != NULL && *fmtp_ptr == '\n') {
+ temp=PL_strtok_r(tmp, ";", &strtok_state);
+ if (temp) {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Annexes are possibly there for this fmtp %s tmp: %s line\n",
+ sdp_p->debug_str, fmtp_ptr, tmp);
+ }
+ while (temp != NULL) {
+ if (strchr(temp, 'D') !=NULL) {
+ attr_p->attr.fmtp.annex_d = TRUE;
+ }
+ if (strchr(temp, 'F') !=NULL) {
+ attr_p->attr.fmtp.annex_f = TRUE;
+ }
+ if (strchr(temp, 'I') !=NULL) {
+ attr_p->attr.fmtp.annex_i = TRUE;
+ }
+ if (strchr(temp, 'J') !=NULL) {
+ attr_p->attr.fmtp.annex_j = TRUE;
+ }
+ if (strchr(temp, 'T') !=NULL) {
+ attr_p->attr.fmtp.annex_t = TRUE;
+ }
+ temp=PL_strtok_r(NULL, ";", &strtok_state);
+ }
+ } /* if (temp) */
+ done = TRUE;
+ } else if (strchr(tmp, '/')) {
+ // XXX Note that because RFC 5109 so conveniently specified
+ // this fmtp with no param names, we hope that nothing else
+ // has a slash in the string because otherwise we won't know
+ // how to differentiate.
+ temp=PL_strtok_r(tmp, "/", &strtok_state);
+ if (temp) {
+ iter = 0;
+ while (temp != NULL) {
+ errno = 0;
+ strtoul_result = strtoul(temp, &strtoul_end, 10);
+
+ if (errno ||
+ temp == strtoul_end || strtoul_result > USHRT_MAX) {
+ temp = NULL;
+ continue;
+ }
+ fmtp_p->redundant_encodings[iter++] =
+ (uint8_t)strtoul_result;
+ temp=PL_strtok_r(NULL, "/", &strtok_state);
+ }
+ } /* if (temp) */
+ } else if (SDP_SUCCESS == sdp_verify_attr_fmtp_telephone_event(tmp)) {
+ // XXX Note that DTMF fmtp will fall into here:
+ // a=fmtp:101 0-15 (or 0-15,NN,NN etc)
+ sstrncpy(fmtp_p->dtmf_tones , tmp, sizeof(fmtp_p->dtmf_tones));
+ codec_info_found = TRUE;
+ } else {
+ // unknown parameter - eat chars until ';'
+ CSFLogDebug(logTag, "%s Unknown fmtp type (%s) - ignoring", __FUNCTION__,
+ tmp);
+ fmtp_ptr = sdp_getnextstrtok(fmtp_ptr, tmp, sizeof(tmp), "; \t",
+ &result1);
+ if (result1 != SDP_SUCCESS) {
+ fmtp_ptr = sdp_getnextstrtok(fmtp_ptr, tmp, sizeof(tmp), " \t", &result1);
+ if (result1 != SDP_SUCCESS) {
+ // hmmm, no ; or spaces or tabs; continue on
+ }
+ }
+ }
+ if (*fmtp_ptr == '\n') {
+ // reached end of line, stop parsing
+ done = TRUE;
+ } else {
+ fmtp_ptr++;
+ }
+ } else {
+ done = TRUE;
+ }
+ } /* while - done loop*/
+
+ if (codec_info_found) {
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, payload type %u, bitrate %u, mode %u QCIF = %u, CIF = %u, MAXBR= %u, SQCIF=%u, CIF4= %u, CIF16=%u, CUSTOM=%u,%u,%u , PAR=%u:%u,CPCF=%u, BPP=%u, HRD=%u \n",
+ sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.fmtp.payload_num,
+ attr_p->attr.fmtp.bitrate,
+ attr_p->attr.fmtp.mode,
+ attr_p->attr.fmtp.qcif,
+ attr_p->attr.fmtp.cif,
+ attr_p->attr.fmtp.maxbr,
+ attr_p->attr.fmtp.sqcif,
+ attr_p->attr.fmtp.cif4,
+ attr_p->attr.fmtp.cif16,
+ attr_p->attr.fmtp.custom_x,attr_p->attr.fmtp.custom_y,
+ attr_p->attr.fmtp.custom_mpi,
+ attr_p->attr.fmtp.par_width,
+ attr_p->attr.fmtp.par_height,
+ attr_p->attr.fmtp.cpcf,
+ attr_p->attr.fmtp.bpp,
+ attr_p->attr.fmtp.hrd
+ );
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, payload type %u,PROFILE=%u,LEVEL=%u, INTERLACE - %s",
+ sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.fmtp.payload_num,
+ attr_p->attr.fmtp.profile,
+ attr_p->attr.fmtp.level,
+ attr_p->attr.fmtp.is_interlace ? "YES":"NO");
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed H.264 attributes: profile-level-id=%s, parameter-sets=%s, packetization-mode=%d level-asymmetry-allowed=%d interleaving-depth=%d deint-buf-req=%u max-don-diff=%u, init_buf-time=%u\n",
+ sdp_p->debug_str,
+ attr_p->attr.fmtp.profile_level_id,
+ attr_p->attr.fmtp.parameter_sets,
+ attr_p->attr.fmtp.packetization_mode,
+ attr_p->attr.fmtp.level_asymmetry_allowed,
+ attr_p->attr.fmtp.interleaving_depth,
+ attr_p->attr.fmtp.deint_buf_req,
+ attr_p->attr.fmtp.max_don_diff,
+ attr_p->attr.fmtp.init_buf_time
+ );
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("\n%s Parsed H.264 opt attributes: max-mbps=%u, max-fs=%u, max-cpb=%u max-dpb=%u max-br=%u redundant-pic-cap=%d, deint-buf-cap=%u, max-rcmd-nalu-size=%u , parameter-add=%d\n",
+ sdp_p->debug_str,
+ attr_p->attr.fmtp.max_mbps,
+ attr_p->attr.fmtp.max_fs,
+ attr_p->attr.fmtp.max_cpb,
+ attr_p->attr.fmtp.max_dpb,
+ attr_p->attr.fmtp.max_br,
+ attr_p->attr.fmtp.redundant_pic_cap,
+ attr_p->attr.fmtp.deint_buf_cap,
+ attr_p->attr.fmtp.max_rcmd_nalu_size,
+ attr_p->attr.fmtp.parameter_add);
+
+ }
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed annexes are : D=%d F=%d I=%d J=%d T=%d, K=%d N=%d P=%d,%d\n",
+ sdp_p->debug_str,
+ attr_p->attr.fmtp.annex_d,
+ attr_p->attr.fmtp.annex_f, attr_p->attr.fmtp.annex_i,
+ attr_p->attr.fmtp.annex_j, attr_p->attr.fmtp.annex_t,
+ attr_p->attr.fmtp.annex_k_val,
+ attr_p->attr.fmtp.annex_n_val,
+ attr_p->attr.fmtp.annex_p_val_picture_resize,
+ attr_p->attr.fmtp.annex_p_val_warp);
+
+ }
+ SDP_FREE(temp_ptr);
+ return (SDP_SUCCESS);
+ } else {
+ done = FALSE;
+ fmtp_ptr = src_ptr;
+ tmp[0] = '\0';
+ }
+
+ for (i=0; !done; i++) {
+ fmtp_p->fmtp_format = SDP_FMTP_NTE;
+ /* Look for comma separated events */
+ fmtp_ptr = sdp_getnextstrtok(fmtp_ptr, tmp, sizeof(tmp), ", \t", &result1);
+ if (result1 != SDP_SUCCESS) {
+ done = TRUE;
+ continue;
+ }
+ /* Now look for '-' separated range */
+ ptr2 = tmp;
+ low_val = (uint8_t)sdp_getnextnumtok(ptr2, (const char **)&ptr2,
+ "- \t", &result1);
+ if (*ptr2 == '-') {
+ high_val = (uint8_t)sdp_getnextnumtok(ptr2, (const char **)&ptr2,
+ "- \t", &result2);
+ } else {
+ high_val = low_val;
+ }
+
+ if ((result1 != SDP_SUCCESS) || (result2 != SDP_SUCCESS)) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid named events specified for fmtp attribute.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ SDP_FREE(temp_ptr);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ for (i = low_val; i <= high_val; i++) {
+ mapword = i/SDP_NE_BITS_PER_WORD;
+ bmap = SDP_NE_BIT_0 << (i%32);
+ fmtp_p->bmap[mapword] |= bmap;
+ }
+ if (high_val > fmtp_p->maxval) {
+ fmtp_p->maxval = high_val;
+ }
+ }
+
+ if (fmtp_p->maxval == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No named events specified for fmtp attribute.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ SDP_FREE(temp_ptr);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, payload type %u, ", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.fmtp.payload_num);
+ }
+ SDP_FREE(temp_ptr);
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e
+sdp_build_attr_fmtp_params (sdp_t *sdp_p, sdp_fmtp_t *fmtp_p, flex_string *fs)
+{
+ uint16_t event_id;
+ uint32_t mask;
+ uint32_t mapword;
+ uint8_t min = 0;
+ uint8_t max = 0;
+ tinybool range_start = FALSE;
+ tinybool range_end = FALSE;
+ tinybool semicolon = FALSE;
+
+ switch (fmtp_p->fmtp_format) {
+ case SDP_FMTP_MODE:
+ sdp_append_name_and_unsigned(fs, "mode", fmtp_p->mode, FALSE);
+ break;
+
+ case SDP_FMTP_CODEC_INFO:
+ FMTP_BUILD_UNSIGNED(fmtp_p->bitrate > 0, "bitrate", fmtp_p->bitrate)
+
+ FMTP_BUILD_STRING(fmtp_p->annexa_required,
+ "annexa", (fmtp_p->annexa ? "yes" : "no"))
+
+ FMTP_BUILD_STRING(fmtp_p->annexb_required,
+ "annexb", (fmtp_p->annexa ? "yes" : "no"))
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->qcif > 0, "QCIF", fmtp_p->qcif)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->cif > 0, "CIF", fmtp_p->cif)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->maxbr > 0, "MAXBR", fmtp_p->maxbr)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->sqcif > 0, "SQCIF", fmtp_p->sqcif)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->cif4 > 0, "CIF4", fmtp_p->cif4)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->cif16 > 0, "CIF16", fmtp_p->cif16)
+
+ if ((fmtp_p->custom_x > 0) && (fmtp_p->custom_y > 0) &&
+ (fmtp_p->custom_mpi > 0)) {
+ flex_string_sprintf(fs, "%sCUSTOM=%u,%u,%u",
+ semicolon ? ";" : "",
+ fmtp_p->custom_x,
+ fmtp_p->custom_y,
+ fmtp_p->custom_mpi);
+
+ semicolon = TRUE;
+ }
+
+ if ((fmtp_p->par_height > 0) && (fmtp_p->par_width > 0)) {
+ flex_string_sprintf(fs, "%sPAR=%u:%u",
+ semicolon ? ";" : "",
+ fmtp_p->par_width,
+ fmtp_p->par_width);
+
+ semicolon = TRUE;
+ }
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->cpcf > 0, "CPCF", fmtp_p->cpcf)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->bpp > 0, "BPP", fmtp_p->bpp)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->hrd > 0, "HRD", fmtp_p->hrd)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->profile >= 0, "PROFILE", fmtp_p->profile)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->level >= 0, "LEVEL", fmtp_p->level)
+
+ FMTP_BUILD_FLAG(fmtp_p->is_interlace, "INTERLACE")
+
+ FMTP_BUILD_FLAG(fmtp_p->annex_d, "D")
+
+ FMTP_BUILD_FLAG(fmtp_p->annex_f, "F")
+
+ FMTP_BUILD_FLAG(fmtp_p->annex_i, "I")
+
+ FMTP_BUILD_FLAG(fmtp_p->annex_j, "J")
+
+ FMTP_BUILD_FLAG(fmtp_p->annex_t, "T")
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->annex_k_val > 0,
+ "K", fmtp_p->annex_k_val)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->annex_n_val > 0,
+ "N", fmtp_p->annex_n_val)
+
+ if ((fmtp_p->annex_p_val_picture_resize > 0) &&
+ (fmtp_p->annex_p_val_warp > 0)) {
+ flex_string_sprintf(fs, "%sP=%d:%d",
+ semicolon ? ";" : "",
+ fmtp_p->annex_p_val_picture_resize,
+ fmtp_p->annex_p_val_warp);
+
+ semicolon = TRUE;
+ }
+
+ FMTP_BUILD_STRING(strlen(fmtp_p->profile_level_id) > 0,
+ "profile-level-id", fmtp_p->profile_level_id)
+
+ FMTP_BUILD_STRING(strlen(fmtp_p->parameter_sets) > 0,
+ "sprop-parameter-sets", fmtp_p->parameter_sets)
+
+ FMTP_BUILD_UNSIGNED(
+ fmtp_p->packetization_mode < SDP_MAX_PACKETIZATION_MODE_VALUE,
+ "packetization-mode", fmtp_p->packetization_mode)
+
+ FMTP_BUILD_UNSIGNED(
+ fmtp_p->level_asymmetry_allowed <=
+ SDP_MAX_LEVEL_ASYMMETRY_ALLOWED_VALUE,
+ "level-asymmetry-allowed", fmtp_p->level_asymmetry_allowed)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->interleaving_depth > 0,
+ "sprop-interleaving-depth", fmtp_p->interleaving_depth)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->flag & SDP_DEINT_BUF_REQ_FLAG,
+ "sprop-deint-buf-req", fmtp_p->deint_buf_req)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_don_diff > 0,
+ "sprop-max-don-diff", fmtp_p->max_don_diff)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->flag & SDP_INIT_BUF_TIME_FLAG,
+ "sprop-init-buf-time", fmtp_p->init_buf_time)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_mbps > 0,
+ "max-mbps", fmtp_p->max_mbps)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_fs > 0, "max-fs", fmtp_p->max_fs)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_fr > 0, "max-fr", fmtp_p->max_fr)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_cpb > 0, "max-cpb", fmtp_p->max_cpb)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_dpb > 0, "max-dpb", fmtp_p->max_dpb)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->max_br > 0, "max-br", fmtp_p->max_br)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->redundant_pic_cap > 0,
+ "redundant-pic-cap", fmtp_p->redundant_pic_cap)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->flag & SDP_DEINT_BUF_CAP_FLAG,
+ "deint-buf-cap", fmtp_p->deint_buf_cap)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->flag & SDP_MAX_RCMD_NALU_SIZE_FLAG,
+ "max-rcmd-naFMTP_BUILD_FLlu-size", fmtp_p->max_rcmd_nalu_size)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->parameter_add <= 1, "parameter-add",
+ fmtp_p->parameter_add)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->maxaveragebitrate > 0,
+ "maxaveragebitrate", fmtp_p->maxaveragebitrate)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->usedtx <= 1, "usedtx", fmtp_p->usedtx)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->stereo <= 1, "stereo", fmtp_p->stereo)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->useinbandfec <= 1,
+ "useinbandfec", fmtp_p->useinbandfec)
+
+ FMTP_BUILD_STRING(strlen(fmtp_p->maxcodedaudiobandwidth) > 0,
+ "maxcodedaudiobandwidth", fmtp_p->maxcodedaudiobandwidth)
+
+ FMTP_BUILD_UNSIGNED(fmtp_p->cbr <= 1, "cbr", fmtp_p->cbr)
+
+ break;
+
+ case SDP_FMTP_NTE:
+ default:
+ break;
+ }
+
+ for(event_id = 0, mapword = 0, mask = SDP_NE_BIT_0;
+ event_id <= fmtp_p->maxval;
+ event_id++, mapword = event_id/SDP_NE_BITS_PER_WORD ) {
+
+ if (event_id % SDP_NE_BITS_PER_WORD) {
+ mask <<= 1;
+ } else {
+ /* crossed a bitmap word boundary */
+ mask = SDP_NE_BIT_0;
+ if (!range_start && !range_end && !fmtp_p->bmap[mapword]) {
+ /* no events in this word, skip to the last event id
+ * in this bitmap word. */
+ event_id += SDP_NE_BITS_PER_WORD - 1;
+ continue;
+ }
+ }
+
+ if (fmtp_p->bmap[mapword] & mask) {
+ if (!range_start) {
+ range_start = TRUE;
+ min = max = (uint8_t)event_id;
+ } else {
+ max = (uint8_t)event_id;
+ }
+ range_end = (max == fmtp_p->maxval);
+ } else {
+ /* If we were in the middle of a range, then we've hit the
+ * end. If we weren't, there is no end to hit. */
+ range_end = range_start;
+ }
+
+ /* If this is the end of the range, print it to the string. */
+ if (range_end) {
+ range_start = range_end = FALSE;
+
+ flex_string_sprintf(fs, "%u", min);
+
+ if (min != max) {
+ flex_string_sprintf(fs, "-%u", max);
+ }
+
+ if (max != fmtp_p->maxval) {
+ flex_string_append(fs, ",");
+ }
+ }
+ }
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_fmtp (sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs)
+{
+ sdp_fmtp_t *fmtp_p;
+ sdp_result_e result;
+
+ flex_string_sprintf(fs, "a=%s:%u ",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.fmtp.payload_num);
+
+ fmtp_p = &(attr_p->attr.fmtp);
+
+ result = sdp_build_attr_fmtp_params(sdp_p, fmtp_p, fs);
+
+ if (result != SDP_SUCCESS) {
+ return result;
+ }
+
+ flex_string_append(fs, "\r\n");
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_sctpmap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result = SDP_SUCCESS;
+ char tmp[SDP_MAX_STRING_LEN];
+ uint32_t streams;
+
+ /* Find the payload type number. */
+ attr_p->attr.sctpmap.port = (uint16_t)sdp_getnextnumtok(ptr, &ptr,
+ " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: no sctpmap port number",
+ sdp_p->debug_str);
+ return SDP_INVALID_PARAMETER;
+ }
+
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No sctpmap protocol specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ sstrncpy(attr_p->attr.sctpmap.protocol, tmp,
+ sizeof (attr_p->attr.sctpmap.protocol));
+
+ streams = sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No sctpmap streams specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ attr_p->attr.sctpmap.streams = streams;
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_sctpmap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%u %s %u\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.sctpmap.port,
+ attr_p->attr.sctpmap.protocol,
+ attr_p->attr.sctpmap.streams);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_direction (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ /* No parameters to parse. */
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_direction (sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s\r\n", sdp_get_attr_name(attr_p->type));
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_qos (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the strength tag. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No qos strength tag specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.qos.strength = SDP_QOS_STRENGTH_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_STRENGTH; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_strength[i].name,
+ sdp_qos_strength[i].strlen) == 0) {
+ attr_p->attr.qos.strength = (sdp_qos_strength_e)i;
+ }
+ }
+ if (attr_p->attr.qos.strength == SDP_QOS_STRENGTH_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS strength tag unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the qos direction. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No qos direction specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.qos.direction = SDP_QOS_DIR_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_DIR; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_direction[i].name,
+ sdp_qos_direction[i].strlen) == 0) {
+ attr_p->attr.qos.direction = (sdp_qos_dir_e)i;
+ }
+ }
+ if (attr_p->attr.qos.direction == SDP_QOS_DIR_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS direction unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* See if confirm was specified. Defaults to FALSE. */
+ attr_p->attr.qos.confirm = FALSE;
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result == SDP_SUCCESS) {
+ if (cpr_strncasecmp(tmp, "confirm", sizeof("confirm")) == 0) {
+ attr_p->attr.qos.confirm = TRUE;
+ }
+ if (attr_p->attr.qos.confirm == FALSE) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS confirm parameter invalid (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, strength %s, direction %s, confirm %s",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ sdp_get_qos_strength_name(attr_p->attr.qos.strength),
+ sdp_get_qos_direction_name(attr_p->attr.qos.direction),
+ (attr_p->attr.qos.confirm ? "set" : "not set"));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_qos (sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s %s%s\r\n", sdp_attr[attr_p->type].name,
+ sdp_get_qos_strength_name(attr_p->attr.qos.strength),
+ sdp_get_qos_direction_name(attr_p->attr.qos.direction),
+ attr_p->attr.qos.confirm ? " confirm" : "");
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_curr (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the curr type tag. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No curr attr type specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.curr.type = SDP_CURR_UNKNOWN_TYPE;
+ for (i=0; i < SDP_MAX_CURR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_curr_type[i].name,
+ sdp_curr_type[i].strlen) == 0) {
+ attr_p->attr.curr.type = (sdp_curr_type_e)i;
+ }
+ }
+
+ if (attr_p->attr.curr.type != SDP_CURR_QOS_TYPE) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unknown curr type.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Check qos status type */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No curr attr type specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.curr.status_type = SDP_QOS_STATUS_TYPE_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_STATUS_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_status_type[i].name,
+ sdp_qos_status_type[i].strlen) == 0) {
+ attr_p->attr.curr.status_type = (sdp_qos_status_types_e)i;
+ }
+ }
+
+
+ /* Find the qos direction. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No qos direction specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.curr.direction = SDP_QOS_DIR_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_DIR; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_direction[i].name,
+ sdp_qos_direction[i].strlen) == 0) {
+ attr_p->attr.curr.direction = (sdp_qos_dir_e)i;
+ }
+ }
+ if (attr_p->attr.curr.direction == SDP_QOS_DIR_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS direction unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, type %s status type %s, direction %s",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ sdp_get_curr_type_name(attr_p->attr.curr.type),
+ sdp_get_qos_status_type_name(attr_p->attr.curr.status_type),
+ sdp_get_qos_direction_name(attr_p->attr.curr.direction));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_curr (sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s %s %s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_curr_type_name(attr_p->attr.curr.type),
+ sdp_get_qos_status_type_name(attr_p->attr.curr.status_type),
+ sdp_get_qos_direction_name(attr_p->attr.curr.direction));
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_des (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the curr type tag. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No des attr type specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.des.type = SDP_DES_UNKNOWN_TYPE;
+ for (i=0; i < SDP_MAX_CURR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_des_type[i].name,
+ sdp_des_type[i].strlen) == 0) {
+ attr_p->attr.des.type = (sdp_des_type_e)i;
+ }
+ }
+
+ if (attr_p->attr.des.type != SDP_DES_QOS_TYPE) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unknown conf type.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the strength tag. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No qos strength tag specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.des.strength = SDP_QOS_STRENGTH_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_STRENGTH; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_strength[i].name,
+ sdp_qos_strength[i].strlen) == 0) {
+ attr_p->attr.des.strength = (sdp_qos_strength_e)i;
+ }
+ }
+ if (attr_p->attr.des.strength == SDP_QOS_STRENGTH_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS strength tag unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Check qos status type */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No des attr type specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.des.status_type = SDP_QOS_STATUS_TYPE_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_STATUS_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_status_type[i].name,
+ sdp_qos_status_type[i].strlen) == 0) {
+ attr_p->attr.des.status_type = (sdp_qos_status_types_e)i;
+ }
+ }
+
+
+ /* Find the qos direction. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No qos direction specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.des.direction = SDP_QOS_DIR_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_DIR; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_direction[i].name,
+ sdp_qos_direction[i].strlen) == 0) {
+ attr_p->attr.des.direction = (sdp_qos_dir_e)i;
+ }
+ }
+ if (attr_p->attr.des.direction == SDP_QOS_DIR_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS direction unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, type %s strength %s status type %s, direction %s",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ sdp_get_des_type_name(attr_p->attr.des.type),
+ sdp_get_qos_strength_name(attr_p->attr.qos.strength),
+ sdp_get_qos_status_type_name(attr_p->attr.des.status_type),
+ sdp_get_qos_direction_name(attr_p->attr.des.direction));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+
+sdp_result_e sdp_build_attr_des (sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s %s %s %s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_curr_type_name((sdp_curr_type_e)attr_p->attr.des.type),
+ sdp_get_qos_strength_name(attr_p->attr.des.strength),
+ sdp_get_qos_status_type_name(attr_p->attr.des.status_type),
+ sdp_get_qos_direction_name(attr_p->attr.des.direction));
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_conf (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the curr type tag. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No conf attr type specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.conf.type = SDP_CONF_UNKNOWN_TYPE;
+ for (i=0; i < SDP_MAX_CURR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_conf_type[i].name,
+ sdp_conf_type[i].strlen) == 0) {
+ attr_p->attr.conf.type = (sdp_conf_type_e)i;
+ }
+ }
+
+ if (attr_p->attr.conf.type != SDP_CONF_QOS_TYPE) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unknown conf type.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Check qos status type */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No conf attr type specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.conf.status_type = SDP_QOS_STATUS_TYPE_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_STATUS_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_status_type[i].name,
+ sdp_qos_status_type[i].strlen) == 0) {
+ attr_p->attr.conf.status_type = (sdp_qos_status_types_e)i;
+ }
+ }
+
+
+ /* Find the qos direction. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No qos direction specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.conf.direction = SDP_QOS_DIR_UNKNOWN;
+ for (i=0; i < SDP_MAX_QOS_DIR; i++) {
+ if (cpr_strncasecmp(tmp, sdp_qos_direction[i].name,
+ sdp_qos_direction[i].strlen) == 0) {
+ attr_p->attr.conf.direction = (sdp_qos_dir_e)i;
+ }
+ }
+ if (attr_p->attr.conf.direction == SDP_QOS_DIR_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: QOS direction unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, type %s status type %s, direction %s",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ sdp_get_conf_type_name(attr_p->attr.conf.type),
+ sdp_get_qos_status_type_name(attr_p->attr.conf.status_type),
+ sdp_get_qos_direction_name(attr_p->attr.conf.direction));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_conf (sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s %s %s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_conf_type_name(attr_p->attr.conf.type),
+ sdp_get_qos_status_type_name(attr_p->attr.conf.status_type),
+ sdp_get_qos_direction_name(attr_p->attr.conf.direction));
+
+ return SDP_SUCCESS;
+}
+
+/*
+ * Parse a rtpmap or a sprtmap. Both formats use the same structure
+ * the only difference being the keyword "rtpmap" vs "sprtmap". The
+ * rtpmap field in the sdp_attr_t is used to store both mappings.
+ */
+sdp_result_e sdp_parse_attr_transport_map (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ attr_p->attr.transport_map.payload_num = 0;
+ attr_p->attr.transport_map.encname[0] = '\0';
+ attr_p->attr.transport_map.clockrate = 0;
+ attr_p->attr.transport_map.num_chan = 1;
+
+ /* Find the payload type number. */
+ attr_p->attr.transport_map.payload_num =
+ (uint16_t)sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid payload type specified for %s attribute.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the encoding name. */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.transport_map.encname,
+ sizeof(attr_p->attr.transport_map.encname), "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No encoding name specified in %s attribute.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the clockrate. */
+ attr_p->attr.transport_map.clockrate =
+ sdp_getnextnumtok(ptr, &ptr, "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No clockrate specified for "
+ "%s attribute, set to default of 8000.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ attr_p->attr.transport_map.clockrate = 8000;
+ }
+
+ /* Find the number of channels, if specified. This is optional. */
+ if (*ptr == '/') {
+ /* If a '/' exists, expect something valid beyond it. */
+ attr_p->attr.transport_map.num_chan =
+ (uint16_t)sdp_getnextnumtok(ptr, &ptr, "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid number of channels parameter"
+ " for rtpmap attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, payload type %u, encoding name %s, "
+ "clockrate %u", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.transport_map.payload_num,
+ attr_p->attr.transport_map.encname,
+ attr_p->attr.transport_map.clockrate);
+ if (attr_p->attr.transport_map.num_chan != 1) {
+ SDP_PRINT("/%u", attr_p->attr.transport_map.num_chan);
+ }
+ }
+
+ return (SDP_SUCCESS);
+}
+
+/*
+ * Build a rtpmap or a sprtmap. Both formats use the same structure
+ * the only difference being the keyword "rtpmap" vs "sprtmap". The
+ * rtpmap field in the sdp_attr_t is used for both mappings.
+ */
+sdp_result_e sdp_build_attr_transport_map (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ if (attr_p->attr.transport_map.num_chan == 1) {
+ flex_string_sprintf(fs, "a=%s:%u %s/%u\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.transport_map.payload_num,
+ attr_p->attr.transport_map.encname,
+ attr_p->attr.transport_map.clockrate);
+ } else {
+ flex_string_sprintf(fs, "a=%s:%u %s/%u/%u\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.transport_map.payload_num,
+ attr_p->attr.transport_map.encname,
+ attr_p->attr.transport_map.clockrate,
+ attr_p->attr.transport_map.num_chan);
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_subnet (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ char *slash_ptr;
+ sdp_result_e result;
+ tinybool type_found = FALSE;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the subnet network type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No network type specified in subnet attribute.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.subnet.nettype = SDP_NT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_NETWORK_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_nettype[i].name,
+ sdp_nettype[i].strlen) == 0) {
+ type_found = TRUE;
+ }
+ if (type_found == TRUE) {
+ if (sdp_p->conf_p->nettype_supported[i] == TRUE) {
+ attr_p->attr.subnet.nettype = (sdp_nettype_e)i;
+ }
+ type_found = FALSE;
+ }
+ }
+ if (attr_p->attr.subnet.nettype == SDP_NT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Subnet network type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the subnet address type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No address type specified in subnet attribute.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.subnet.addrtype = SDP_AT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_ADDR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_addrtype[i].name,
+ sdp_addrtype[i].strlen) == 0) {
+ type_found = TRUE;
+ }
+ if (type_found == TRUE) {
+ if (sdp_p->conf_p->addrtype_supported[i] == TRUE) {
+ attr_p->attr.subnet.addrtype = (sdp_addrtype_e)i;
+ }
+ type_found = FALSE;
+ }
+ }
+ if (attr_p->attr.subnet.addrtype == SDP_AT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Subnet address type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the subnet address. */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.subnet.addr,
+ sizeof(attr_p->attr.subnet.addr), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No subnet address specified in "
+ "subnet attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ slash_ptr = sdp_findchar(attr_p->attr.subnet.addr, "/");
+ if (*slash_ptr == '/') {
+ *slash_ptr++ = '\0';
+ /* If the '/' exists, expect a valid prefix to follow. */
+ attr_p->attr.subnet.prefix = sdp_getnextnumtok(slash_ptr,
+ (const char **)&slash_ptr,
+ " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid subnet prefix specified in "
+ "subnet attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ } else {
+ attr_p->attr.subnet.prefix = SDP_INVALID_VALUE;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, network %s, addr type %s, address %s ",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ sdp_get_network_name(attr_p->attr.subnet.nettype),
+ sdp_get_address_name(attr_p->attr.subnet.addrtype),
+ attr_p->attr.subnet.addr);
+ if (attr_p->attr.subnet.prefix != SDP_INVALID_VALUE) {
+ SDP_PRINT("/%u", (ushort)attr_p->attr.subnet.prefix);
+ }
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_subnet (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ if (attr_p->attr.subnet.prefix == SDP_INVALID_VALUE) {
+ flex_string_sprintf(fs, "a=%s:%s %s %s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_network_name(attr_p->attr.subnet.nettype),
+ sdp_get_address_name(attr_p->attr.subnet.addrtype),
+ attr_p->attr.subnet.addr);
+ } else {
+ flex_string_sprintf(fs, "a=%s:%s %s %s/%u\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_network_name(attr_p->attr.subnet.nettype),
+ sdp_get_address_name(attr_p->attr.subnet.addrtype),
+ attr_p->attr.subnet.addr,
+ (ushort)attr_p->attr.subnet.prefix);
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_t38_ratemgmt (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the rate mgmt. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No t38 rate management specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.t38ratemgmt = SDP_T38_UNKNOWN_RATE;
+ for (i=0; i < SDP_T38_MAX_RATES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_t38_rate[i].name,
+ sdp_t38_rate[i].strlen) == 0) {
+ attr_p->attr.t38ratemgmt = (sdp_t38_ratemgmt_e)i;
+ }
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, rate %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_t38_ratemgmt_name(attr_p->attr.t38ratemgmt));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_t38_ratemgmt (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_t38_ratemgmt_name(attr_p->attr.t38ratemgmt));
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_t38_udpec (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find the udpec. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No t38 udpEC specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.t38udpec = SDP_T38_UDPEC_UNKNOWN;
+ for (i=0; i < SDP_T38_MAX_UDPEC; i++) {
+ if (cpr_strncasecmp(tmp, sdp_t38_udpec[i].name,
+ sdp_t38_udpec[i].strlen) == 0) {
+ attr_p->attr.t38udpec = (sdp_t38_udpec_e)i;
+ }
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, udpec %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_t38_udpec_name(attr_p->attr.t38udpec));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_t38_udpec (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_t38_udpec_name(attr_p->attr.t38udpec));
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_pc_codec (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ uint16_t i;
+ sdp_result_e result;
+
+ for (i=0; i < SDP_MAX_PAYLOAD_TYPES; i++) {
+ attr_p->attr.pccodec.payload_type[i] = (ushort)sdp_getnextnumtok(ptr, &ptr,
+ " \t", &result);
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+ attr_p->attr.pccodec.num_payloads++;
+ }
+
+ if (attr_p->attr.pccodec.num_payloads == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No payloads specified for %s attr.",
+ sdp_p->debug_str, sdp_attr[attr_p->type].name);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, num payloads %u, payloads: ",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ attr_p->attr.pccodec.num_payloads);
+ for (i=0; i < attr_p->attr.pccodec.num_payloads; i++) {
+ SDP_PRINT("%u ", attr_p->attr.pccodec.payload_type[i]);
+ }
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_pc_codec (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ int i;
+
+ flex_string_sprintf(fs, "a=%s: ", sdp_attr[attr_p->type].name);
+
+ for (i=0; i < attr_p->attr.pccodec.num_payloads; i++) {
+ flex_string_sprintf(fs, "%u ", attr_p->attr.pccodec.payload_type[i]);
+ }
+
+ flex_string_append(fs, "\r\n");
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_parse_attr_cap (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ uint16_t i;
+ sdp_result_e result;
+ sdp_mca_t *cap_p;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Set the capability pointer to NULL for now in case we encounter
+ * an error in parsing.
+ */
+ attr_p->attr.cap_p = NULL;
+ /* Set the capability valid flag to FALSE in case we encounter an
+ * error. If we do, we don't want to process any X-cpar/cpar attributes
+ * from this point until we process the next valid X-cap/cdsc attr. */
+ sdp_p->cap_valid = FALSE;
+
+ /* Allocate resource for new capability. Note that the capability
+ * uses the same structure used for media lines.
+ */
+ cap_p = sdp_alloc_mca(sdp_p->parse_line);
+ if (cap_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+
+ /* Find the capability number. We don't need to store this since we
+ * calculate it for ourselves as we need to. But it must be specified. */
+ (void)sdp_getnextnumtok(ptr, &ptr, "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Capability not specified for %s, "
+ "unable to parse.", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the media type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No media type specified for %s attribute, "
+ "unable to parse.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ cap_p->media = SDP_MEDIA_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_MEDIA_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_media[i].name, sdp_media[i].strlen) == 0) {
+ cap_p->media = (sdp_media_e)i;
+ break;
+ }
+ }
+ if (cap_p->media == SDP_MEDIA_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Media type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the transport protocol type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No transport protocol type specified, "
+ "unable to parse.", sdp_p->debug_str);
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ cap_p->transport = SDP_TRANSPORT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_TRANSPORT_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_transport[i].name,
+ sdp_transport[i].strlen) == 0) {
+ cap_p->transport = (sdp_transport_e)i;
+ break;
+ }
+ }
+ if (cap_p->transport == SDP_TRANSPORT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Transport protocol type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find payload formats. AAL2 X-cap lines allow multiple
+ * transport/profile types per line, so these are handled differently.
+ */
+ if ((cap_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (cap_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (cap_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+ /* Capability processing is not currently defined for AAL2 types
+ * with multiple profiles. We don't process. */
+ sdp_parse_error(sdp_p,
+ "%s Warning: AAL2 profiles unsupported with "
+ "%s attributes.", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ /* Transport is a non-AAL2 type. Parse payloads normally. */
+ sdp_parse_payload_types(sdp_p, cap_p, ptr);
+ if (cap_p->num_payloads == 0) {
+ SDP_FREE(cap_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+
+ attr_p->attr.cap_p = cap_p;
+ /*
+ * This capability attr is valid. We can now handle X-cpar or
+ * cpar attrs.
+ */
+ sdp_p->cap_valid = TRUE;
+ sdp_p->last_cap_inst++;
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed %s media type %s, Transport %s, "
+ "Num payloads %u", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_media_name(cap_p->media),
+ sdp_get_transport_name(cap_p->transport),
+ cap_p->num_payloads);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_cap (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ uint16_t i, j;
+ sdp_mca_t *cap_p;
+ sdp_media_profiles_t *profile_p;
+
+ /* Get a pointer to the capability structure. */
+ cap_p = attr_p->attr.cap_p;
+
+ if (cap_p == NULL) {
+ CSFLogError(logTag, "%s Invalid %s attribute, unable to build.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ /* Return success so build won't fail. */
+ return (SDP_SUCCESS);
+ }
+
+ /* Validate params for this capability line */
+ if ((cap_p->media >= SDP_MAX_MEDIA_TYPES) ||
+ (cap_p->transport >= SDP_MAX_TRANSPORT_TYPES)) {
+ CSFLogDebug(logTag, logTag, "%s Media or transport type invalid for %s "
+ "attribute, unable to build.", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ /* Return success so build won't fail. */
+ return (SDP_SUCCESS);
+ }
+
+ flex_string_sprintf(fs, "a=%s: %u %s ", sdp_attr[attr_p->type].name,
+ sdp_p->cur_cap_num, sdp_get_media_name(cap_p->media));
+
+ /* If the X-cap line has AAL2 profiles, build them differently. */
+ if ((cap_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (cap_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (cap_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+ profile_p = cap_p->media_profiles_p;
+ for (i=0; i < profile_p->num_profiles; i++) {
+ flex_string_sprintf(fs, "%s",
+ sdp_get_transport_name(profile_p->profile[i]));
+
+ for (j=0; j < profile_p->num_payloads[i]; j++) {
+ flex_string_sprintf(fs, " %u",
+ profile_p->payload_type[i][j]);
+ }
+ flex_string_append(fs, " ");
+ }
+
+ flex_string_append(fs, "\r\n");
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built m= media line", sdp_p->debug_str);
+ }
+ return SDP_SUCCESS;
+ }
+
+ /* Build the transport name */
+ flex_string_sprintf(fs, "%s", sdp_get_transport_name(cap_p->transport));
+
+ /* Build the format lists */
+ for (i=0; i < cap_p->num_payloads; i++) {
+ if (cap_p->payload_indicator[i] == SDP_PAYLOAD_ENUM) {
+ flex_string_sprintf(fs, " %s",
+ sdp_get_payload_name((sdp_payload_e)cap_p->payload_type[i]));
+ } else {
+ flex_string_sprintf(fs, " %u", cap_p->payload_type[i]);
+ }
+ }
+
+ flex_string_append(fs, "\r\n");
+
+ /* Increment the current capability number for the next X-cap/cdsc attr. */
+ sdp_p->cur_cap_num += cap_p->num_payloads;
+ sdp_p->last_cap_type = attr_p->type;
+
+ /* Build any X-cpar/cpar attributes associated with this X-cap/cdsc line. */
+ return sdp_build_attr_cpar(sdp_p, cap_p->media_attrs_p, fs);
+}
+
+
+sdp_result_e sdp_parse_attr_cpar (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ uint16_t i;
+ sdp_result_e result;
+ sdp_mca_t *cap_p;
+ sdp_attr_t *cap_attr_p = NULL;
+ sdp_attr_t *prev_attr_p;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Make sure we've processed a valid X-cap/cdsc attr prior to this and
+ * if so, get the cap pointer. */
+ if (sdp_p->cap_valid == TRUE) {
+ sdp_attr_e cap_type;
+
+ if (attr_p->type == SDP_ATTR_CPAR) {
+ cap_type = SDP_ATTR_CDSC;
+ } else {
+ /* Default to X-CAP for everything else */
+ cap_type = SDP_ATTR_X_CAP;
+ }
+
+ if (sdp_p->mca_count == 0) {
+ cap_attr_p = sdp_find_attr(sdp_p, SDP_SESSION_LEVEL, 0,
+ cap_type, sdp_p->last_cap_inst);
+ } else {
+ cap_attr_p = sdp_find_attr(sdp_p, sdp_p->mca_count, 0,
+ cap_type, sdp_p->last_cap_inst);
+ }
+ }
+ if ((cap_attr_p == NULL) || (cap_attr_p->attr.cap_p == NULL)) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: %s attribute specified with no "
+ "prior %s attribute", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ (attr_p->type == SDP_ATTR_CPAR)?
+ (sdp_get_attr_name(SDP_ATTR_CDSC)) :
+ (sdp_get_attr_name(SDP_ATTR_X_CAP)) );
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /*
+ * Ensure there is no mixed syntax like CDSC followed by X-CPAR
+ * or X-CAP followed by CPAR.
+ */
+ if (((cap_attr_p->type == SDP_ATTR_CDSC) &&
+ (attr_p->type == SDP_ATTR_X_CPAR)) ||
+ ( (cap_attr_p->type == SDP_ATTR_X_CAP) &&
+ (attr_p->type == SDP_ATTR_CPAR)) ) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: %s attribute inconsistent with "
+ "prior %s attribute", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_attr_name(cap_attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ cap_p = cap_attr_p->attr.cap_p;
+
+ /* a= is the only token we handle in an X-cpar/cpar attribute. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), "= \t", &result);
+
+ if ((result != SDP_SUCCESS) || (tmp[0] != 'a') || (tmp[1] != '\0')) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid token type (%s) in %s "
+ "attribute, unable to parse", sdp_p->debug_str, tmp,
+ sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ /*sa_ignore NO_NULL_CHK
+ *{ptr is valid since the pointer was checked earlier and the
+ * function would have exited if NULL.}
+ */
+ if (*ptr == '=') {
+ ptr++;
+ }
+
+ /* Find the attribute type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), ": \t", &result);
+ /*sa_ignore NO_NULL_CHK
+ *{ptr is valid since the pointer was checked earlier and the
+ * function would have exited if NULL.}
+ */
+ if (ptr[0] == ':') {
+ /* Skip the ':' char for parsing attribute parameters. */
+ ptr++;
+ }
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No attribute type specified for %s attribute, unable to parse.",
+ sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Reset the type of the attribute from X-cpar/cpar to whatever the
+ * specified type is. */
+ attr_p->type = SDP_ATTR_INVALID;
+ attr_p->next_p = NULL;
+ for (i=0; i < SDP_MAX_ATTR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_attr[i].name, sdp_attr[i].strlen) == 0) {
+ attr_p->type = (sdp_attr_e)i;
+ }
+ }
+ if (attr_p->type == SDP_ATTR_INVALID) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unrecognized attribute (%s) for %s attribute, unable to parse.",
+ sdp_p->debug_str, tmp,
+ sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* We don't allow recursion with the capability attributes. */
+ if ((attr_p->type == SDP_ATTR_X_SQN) ||
+ (attr_p->type == SDP_ATTR_X_CAP) ||
+ (attr_p->type == SDP_ATTR_X_CPAR) ||
+ (attr_p->type == SDP_ATTR_SQN) ||
+ (attr_p->type == SDP_ATTR_CDSC) ||
+ (attr_p->type == SDP_ATTR_CPAR)) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid attribute (%s) for %s"
+ " attribute, unable to parse.", sdp_p->debug_str, tmp,
+ sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Parse the attribute. */
+ result = sdp_attr[attr_p->type].parse_func(sdp_p, attr_p, ptr);
+ if (result != SDP_SUCCESS) {
+ return (result);
+ }
+
+ /* Hook the attribute into the capability structure. */
+ if (cap_p->media_attrs_p == NULL) {
+ cap_p->media_attrs_p = attr_p;
+ } else {
+ for (prev_attr_p = cap_p->media_attrs_p;
+ prev_attr_p->next_p != NULL;
+ prev_attr_p = prev_attr_p->next_p) {
+ ; /* Empty for */
+ }
+ prev_attr_p->next_p = attr_p;
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_cpar (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ sdp_result_e result;
+ const char *cpar_name;
+
+ /* Determine whether to use cpar or X-cpar */
+ if (sdp_p->last_cap_type == SDP_ATTR_CDSC) {
+ cpar_name = sdp_get_attr_name(SDP_ATTR_CPAR);
+ } else {
+ /*
+ * Default to X-CPAR if anything else. This is the backward
+ * compatible value.
+ */
+ cpar_name = sdp_get_attr_name(SDP_ATTR_X_CPAR);
+ }
+
+ while (attr_p != NULL) {
+ if (attr_p->type >= SDP_MAX_ATTR_TYPES) {
+ CSFLogDebug(logTag, "%s Invalid attribute type to build (%u)",
+ sdp_p->debug_str, (unsigned)attr_p->type);
+ } else {
+ flex_string_sprintf(fs, "a=%s: ", cpar_name);
+
+ result = sdp_attr[attr_p->type].build_func(sdp_p, attr_p, fs);
+
+ if (result == SDP_SUCCESS) {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built %s a=%s attribute line",
+ sdp_p->debug_str, cpar_name,
+ sdp_get_attr_name(attr_p->type));
+ }
+ }
+ }
+ attr_p = attr_p->next_p;
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_attr_rtcp (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ char nettype[SDP_MAX_STRING_LEN];
+ sdp_rtcp_t *rtcp_p = &(attr_p->attr.rtcp);
+ int enum_raw;
+
+ memset(rtcp_p, 0, sizeof(sdp_rtcp_t));
+
+ rtcp_p->port = (uint16_t)sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse port for rtcp attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+
+ return SDP_INVALID_PARAMETER;
+ }
+
+ /* The rest is optional, although it is all-or-nothing */
+ (void)sdp_getnextstrtok(ptr, nettype, sizeof(nettype), " \t", &result);
+ if (result == SDP_EMPTY_TOKEN) {
+ /* Nothing after the port */
+ return SDP_SUCCESS;
+ }
+
+ enum_raw = find_token_enum("Nettype", sdp_p, &ptr, sdp_nettype,
+ SDP_MAX_NETWORK_TYPES, SDP_NT_UNSUPPORTED);
+ if (enum_raw == -1) {
+ return SDP_INVALID_PARAMETER;
+ }
+ rtcp_p->nettype = (sdp_nettype_e)enum_raw;
+
+ enum_raw = find_token_enum("Addrtype", sdp_p, &ptr, sdp_addrtype,
+ SDP_MAX_ADDR_TYPES, SDP_AT_UNSUPPORTED);
+ if (enum_raw == -1) {
+ return SDP_INVALID_PARAMETER;
+ }
+ rtcp_p->addrtype = (sdp_addrtype_e)enum_raw;
+
+ ptr = sdp_getnextstrtok(ptr, rtcp_p->addr, sizeof(rtcp_p->addr), " \t",
+ &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse addr for rtcp attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+
+ return SDP_INVALID_PARAMETER;
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_rtcp (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ /* We should not be serializing SDP anyway, but we need this function until
+ * Bug 1112737 is resolved. */
+ return SDP_FAILURE;
+}
+
+sdp_result_e sdp_parse_attr_rtr (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsing a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+ /*Default confirm to FALSE. */
+ attr_p->attr.rtr.confirm = FALSE;
+
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS){ // No confirm tag specified is not an error
+ return (SDP_SUCCESS);
+ } else {
+ /* See if confirm was specified. Defaults to FALSE. */
+ if (cpr_strncasecmp(tmp, "confirm", sizeof("confirm")) == 0) {
+ attr_p->attr.rtr.confirm = TRUE;
+ }
+ if (attr_p->attr.rtr.confirm == FALSE) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: RTR confirm parameter invalid (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ tmp);
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+sdp_result_e sdp_build_attr_rtr (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s%s\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.rtr.confirm ? ":confirm" : "");
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_comediadir (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ tinybool type_found = FALSE;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ attr_p->attr.comediadir.role = SDP_MEDIADIR_ROLE_PASSIVE;
+ attr_p->attr.comediadir.conn_info_present = FALSE;
+ attr_p->attr.comediadir.conn_info.nettype = SDP_NT_INVALID;
+ attr_p->attr.comediadir.src_port = 0;
+
+ /* Find the media direction role. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), ": \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No role parameter specified for "
+ "comediadir attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.comediadir.role = SDP_MEDIADIR_ROLE_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_MEDIADIR_ROLES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_mediadir_role[i].name,
+ sdp_mediadir_role[i].strlen) == 0) {
+ type_found = TRUE;
+ attr_p->attr.comediadir.role = (sdp_mediadir_role_e)i;
+ break;
+ }
+ }
+ if (attr_p->attr.comediadir.role == SDP_MEDIADIR_ROLE_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid role type specified for "
+ "comediadir attribute (%s).", sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* If the role is passive, we don't expect any more params. */
+ if (attr_p->attr.comediadir.role == SDP_MEDIADIR_ROLE_PASSIVE) {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, passive",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ }
+ return (SDP_SUCCESS);
+ }
+
+ /* Find the connection information if present */
+ /* parse to get the nettype */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No network type specified in comediadir "
+ "attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_SUCCESS); /* as the optional parameters are not there */
+ }
+ attr_p->attr.comediadir.conn_info.nettype = SDP_NT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_NETWORK_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_nettype[i].name,
+ sdp_nettype[i].strlen) == 0) {
+ type_found = TRUE;
+ }
+ if (type_found == TRUE) {
+ if (sdp_p->conf_p->nettype_supported[i] == TRUE) {
+ attr_p->attr.comediadir.conn_info.nettype = (sdp_nettype_e)i;
+ }
+ type_found = FALSE;
+ }
+ }
+ if (attr_p->attr.comediadir.conn_info.nettype == SDP_NT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: ConnInfo in Comediadir: network type "
+ "unsupported (%s).", sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ }
+
+ /* Find the comedia address type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No address type specified in comediadir"
+ " attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ }
+ attr_p->attr.comediadir.conn_info.addrtype = SDP_AT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_ADDR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_addrtype[i].name,
+ sdp_addrtype[i].strlen) == 0) {
+ type_found = TRUE;
+ }
+ if (type_found == TRUE) {
+ if (sdp_p->conf_p->addrtype_supported[i] == TRUE) {
+ attr_p->attr.comediadir.conn_info.addrtype = (sdp_addrtype_e)i;
+ }
+ type_found = FALSE;
+ }
+ }
+ if (attr_p->attr.comediadir.conn_info.addrtype == SDP_AT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Conninfo address type unsupported "
+ "(%s).", sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ }
+
+ /* Find the conninfo address. */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.comediadir.conn_info.conn_addr,
+ sizeof(attr_p->attr.comediadir.conn_info.conn_addr), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No conninfo address specified in "
+ "comediadir attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ }
+
+ /* Find the src port info , if any */
+ attr_p->attr.comediadir.src_port = sdp_getnextnumtok(ptr, &ptr, " \t",
+ &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No src port specified in "
+ "comediadir attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, network %s, addr type %s, address %s "
+ "srcport %u ",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ sdp_get_network_name(attr_p->attr.comediadir.conn_info.nettype),
+ sdp_get_address_name(attr_p->attr.comediadir.conn_info.addrtype),
+ attr_p->attr.comediadir.conn_info.conn_addr,
+ (unsigned int)attr_p->attr.comediadir.src_port);
+ }
+
+ if (sdp_p->conf_p->num_invalid_param > 0) {
+ return (SDP_INVALID_PARAMETER);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e
+sdp_build_attr_comediadir (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_get_mediadir_role_name(attr_p->attr.comediadir.role));
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_silencesupp (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ /* Find silenceSuppEnable */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No silenceSupp enable value specified, parse failed.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (cpr_strncasecmp(tmp, "on", sizeof("on")) == 0) {
+ attr_p->attr.silencesupp.enabled = TRUE;
+ } else if (cpr_strncasecmp(tmp, "off", sizeof("off")) == 0) {
+ attr_p->attr.silencesupp.enabled = FALSE;
+ } else if (cpr_strncasecmp(tmp, "-", sizeof("-")) == 0) {
+ attr_p->attr.silencesupp.enabled = FALSE;
+ } else {
+ sdp_parse_error(sdp_p,
+ "%s Warning: silenceSuppEnable parameter invalid (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find silenceTimer -- uint16_t or "-" */
+
+ attr_p->attr.silencesupp.timer =
+ (uint16_t)sdp_getnextnumtok_or_null(ptr, &ptr, " \t",
+ &attr_p->attr.silencesupp.timer_null,
+ &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid timer value specified for "
+ "silenceSupp attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find suppPref */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No silenceSupp pref specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.silencesupp.pref = SDP_SILENCESUPP_PREF_UNKNOWN;
+ for (i=0; i < SDP_MAX_SILENCESUPP_PREF; i++) {
+ if (cpr_strncasecmp(tmp, sdp_silencesupp_pref[i].name,
+ sdp_silencesupp_pref[i].strlen) == 0) {
+ attr_p->attr.silencesupp.pref = (sdp_silencesupp_pref_e)i;
+ }
+ }
+ if (attr_p->attr.silencesupp.pref == SDP_SILENCESUPP_PREF_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: silenceSupp pref unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find sidUse */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No silenceSupp sidUse specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.silencesupp.siduse = SDP_SILENCESUPP_SIDUSE_UNKNOWN;
+ for (i=0; i < SDP_MAX_SILENCESUPP_SIDUSE; i++) {
+ if (cpr_strncasecmp(tmp, sdp_silencesupp_siduse[i].name,
+ sdp_silencesupp_siduse[i].strlen) == 0) {
+ attr_p->attr.silencesupp.siduse = (sdp_silencesupp_siduse_e)i;
+ }
+ }
+ if (attr_p->attr.silencesupp.siduse == SDP_SILENCESUPP_SIDUSE_UNKNOWN) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: silenceSupp sidUse unrecognized (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find fxnslevel -- uint8_t or "-" */
+ attr_p->attr.silencesupp.fxnslevel =
+ (uint8_t)sdp_getnextnumtok_or_null(ptr, &ptr, " \t",
+ &attr_p->attr.silencesupp.fxnslevel_null,
+ &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid fxnslevel value specified for "
+ "silenceSupp attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, enabled %s",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ (attr_p->attr.silencesupp.enabled ? "on" : "off"));
+ if (attr_p->attr.silencesupp.timer_null) {
+ SDP_PRINT(" timer=-");
+ } else {
+ SDP_PRINT(" timer=%u,", attr_p->attr.silencesupp.timer);
+ }
+ SDP_PRINT(" pref=%s, siduse=%s,",
+ sdp_get_silencesupp_pref_name(attr_p->attr.silencesupp.pref),
+ sdp_get_silencesupp_siduse_name(
+ attr_p->attr.silencesupp.siduse));
+ if (attr_p->attr.silencesupp.fxnslevel_null) {
+ SDP_PRINT(" fxnslevel=-");
+ } else {
+ SDP_PRINT(" fxnslevel=%u,", attr_p->attr.silencesupp.fxnslevel);
+ }
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_silencesupp (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ char temp_timer_string[11];
+ char temp_fxnslevel_string[11];
+
+ if (attr_p->attr.silencesupp.timer_null) {
+ snprintf(temp_timer_string, sizeof(temp_timer_string), "-");
+ } else {
+ snprintf(temp_timer_string, sizeof(temp_timer_string), "%u", attr_p->attr.silencesupp.timer);
+ }
+
+ if (attr_p->attr.silencesupp.fxnslevel_null) {
+ snprintf(temp_fxnslevel_string, sizeof(temp_fxnslevel_string), "-");
+ } else {
+ snprintf(temp_fxnslevel_string, sizeof(temp_fxnslevel_string), "%u", attr_p->attr.silencesupp.fxnslevel);
+ }
+
+ flex_string_sprintf(fs, "a=%s:%s %s %s %s %s\r\n",
+ sdp_attr[attr_p->type].name,
+ (attr_p->attr.silencesupp.enabled ? "on" : "off"),
+ temp_timer_string,
+ sdp_get_silencesupp_pref_name(attr_p->attr.silencesupp.pref),
+ sdp_get_silencesupp_siduse_name(attr_p->attr.silencesupp.siduse),
+ temp_fxnslevel_string);
+
+ return SDP_SUCCESS;
+}
+
+/*
+ * sdp_parse_context_crypto_suite
+ *
+ * This routine parses the crypto suite pointed to by str, stores the crypto suite value into the
+ * srtp context suite component of the LocalConnectionOptions pointed to by lco_node_ptr and stores
+ * pointer to the next crypto parameter in tmp_ptr
+ */
+tinybool sdp_parse_context_crypto_suite(char * str, sdp_attr_t *attr_p, sdp_t *sdp_p) {
+ /*
+ * Three crypto_suites are defined: (Notice no SPACE between "crypto:" and the <crypto-suite>
+ * AES_CM_128_HMAC_SHA1_80
+ * AES_CM_128_HMAC_SHA1_32
+ * F8_128_HMAC_SHA1_80
+ */
+
+ int i;
+
+ /* Check crypto suites */
+ for(i=0; i<SDP_SRTP_MAX_NUM_CRYPTO_SUITES; i++) {
+ if (!cpr_strcasecmp(sdp_srtp_crypto_suite_array[i].crypto_suite_str, str)) {
+ attr_p->attr.srtp_context.suite = sdp_srtp_crypto_suite_array[i].crypto_suite_val;
+ attr_p->attr.srtp_context.master_key_size_bytes =
+ sdp_srtp_crypto_suite_array[i].key_size_bytes;
+ attr_p->attr.srtp_context.master_salt_size_bytes =
+ sdp_srtp_crypto_suite_array[i].salt_size_bytes;
+ return TRUE; /* There is a succesful match so exit */
+ }
+ }
+ /* couldn't find a matching crypto suite */
+ sdp_parse_error(sdp_p,
+ "%s No Matching crypto suite for SRTP Context(%s)-'X-crypto:v1' expected",
+ sdp_p->debug_str, str);
+
+ return FALSE;
+}
+
+
+sdp_result_e sdp_build_attr_srtpcontext (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+#define MAX_BASE64_ENCODE_SIZE_BYTES 60
+ int output_len = MAX_BASE64_ENCODE_SIZE_BYTES;
+ int key_size = attr_p->attr.srtp_context.master_key_size_bytes;
+ int salt_size = attr_p->attr.srtp_context.master_salt_size_bytes;
+ unsigned char base64_encoded_data[MAX_BASE64_ENCODE_SIZE_BYTES];
+ unsigned char base64_encoded_input[MAX_BASE64_ENCODE_SIZE_BYTES];
+ base64_result_t status;
+
+ output_len = MAX_BASE64_ENCODE_SIZE_BYTES;
+
+ /* Append master and salt keys */
+ memcpy(base64_encoded_input,
+ attr_p->attr.srtp_context.master_key,
+ key_size );
+ memcpy(base64_encoded_input + key_size,
+ attr_p->attr.srtp_context.master_salt,
+ salt_size );
+
+ if ((status = base64_encode(base64_encoded_input, key_size + salt_size,
+ base64_encoded_data, &output_len)) != BASE64_SUCCESS) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Error: Failure to Base64 Encoded data (%s) ",
+ sdp_p->debug_str, BASE64_RESULT_TO_STRING(status));
+ }
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ *(base64_encoded_data + output_len) = '\0';
+
+ flex_string_sprintf(fs, "a=%s:%s inline:%s||\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_srtp_context_crypto_suite[attr_p->attr.srtp_context.suite].name,
+ base64_encoded_data);
+
+ return SDP_SUCCESS;
+}
+
+/*
+ * sdp_parse_attr_mptime
+ * This function parses the a=mptime sdp line. This parameter consists of
+ * one or more numbers or hyphens ("-"). The first parameter must be a
+ * number. The number of parameters must match the number of formats specified
+ * on the m= line. This function is liberal in that it does not match against
+ * the m= line or require a number for the first parameter.
+ */
+sdp_result_e sdp_parse_attr_mptime (
+ sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ uint16_t i; /* loop counter for parameters */
+ sdp_result_e result; /* value returned by this function */
+ tinybool null_ind; /* true if a parameter is "-" */
+
+ /*
+ * Scan the input line up to the maximum number of parameters supported.
+ * Look for numbers or hyphens and store the resulting values. Hyphens
+ * are stored as zeros.
+ */
+ for (i=0; i<SDP_MAX_PAYLOAD_TYPES; i++) {
+ attr_p->attr.mptime.intervals[i] =
+ (ushort)sdp_getnextnumtok_or_null(ptr,&ptr," \t",&null_ind,&result);
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+ attr_p->attr.mptime.num_intervals++;
+ }
+
+ /*
+ * At least one parameter must be supplied. If not, return an error
+ * and optionally log the failure.
+ */
+ if (attr_p->attr.mptime.num_intervals == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No intervals specified for %s attr.",
+ sdp_p->debug_str, sdp_attr[attr_p->type].name);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /*
+ * Here is some debugging code that helps us track what data
+ * is received and parsed.
+ */
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, num intervals %u, intervals: ",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type),
+ attr_p->attr.mptime.num_intervals);
+ for (i=0; i < attr_p->attr.mptime.num_intervals; i++) {
+ SDP_PRINT("%u ", attr_p->attr.mptime.intervals[i]);
+ }
+ }
+
+ return SDP_SUCCESS;
+}
+
+/*
+ * sdp_build_attr_mptime
+ * This function builds the a=mptime sdp line. It reads the selected attribute
+ * from the sdp structure. Parameters with a value of zero are replaced by
+ * hyphens.
+ */
+sdp_result_e sdp_build_attr_mptime (
+ sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ int i;
+
+ flex_string_sprintf(fs, "a=%s:", sdp_attr[attr_p->type].name);
+
+ /*
+ * Run the list of mptime parameter values and write each one
+ * to the sdp line. Replace zeros with hyphens.
+ */
+ for (i=0; i < attr_p->attr.mptime.num_intervals; i++) {
+ if (i > 0) {
+ flex_string_append(fs, " ");
+ }
+
+ if (attr_p->attr.mptime.intervals[i] == 0) {
+ flex_string_append(fs, "-");
+ } else {
+ flex_string_sprintf(fs, "%u", attr_p->attr.mptime.intervals[i]);
+ }
+ }
+
+ flex_string_append(fs, "\r\n");
+
+ return SDP_SUCCESS;
+}
+
+
+
+sdp_result_e sdp_parse_attr_x_sidin (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ attr_p->attr.stream_data.x_sidin[0] = '\0';
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsing a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+
+ /* Find the X-sidin value */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.stream_data.x_sidin,
+ sizeof(attr_p->attr.stream_data.x_sidin), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No Stream Id incoming specified for X-sidin attribute.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.stream_data.x_sidin);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_x_sidin (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.stream_data.x_sidin);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_x_sidout (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ attr_p->attr.stream_data.x_sidout[0] = '\0';
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsing a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+
+ /* Find the X-sidout value */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.stream_data.x_sidout,
+ sizeof(attr_p->attr.stream_data.x_sidout), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No Stream Id outgoing specified for X-sidout attribute.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.stream_data.x_sidout);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_x_sidout (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.stream_data.x_sidout);
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_parse_attr_x_confid (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ attr_p->attr.stream_data.x_confid[0] = '\0';
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsing a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+
+ /* Find the X-confid value */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.stream_data.x_confid,
+ sizeof(attr_p->attr.stream_data.x_confid), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No Conf Id incoming specified for "
+ "X-confid attribute.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.stream_data.x_confid);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_x_confid (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ if (strlen(attr_p->attr.stream_data.x_confid) <= 0) {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s X-confid value is not set. Cannot build a=X-confid line\n",
+ sdp_p->debug_str);
+ }
+
+ return SDP_INVALID_PARAMETER;
+ }
+
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.stream_data.x_confid);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_group (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ char tmp[64];
+ int i=0;
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsing a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+
+ /* Find the a=group:<attrib> <id1> < id2> ... values */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No group attribute value specified for "
+ "a=group line", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p->attr.stream_data.group_attr = SDP_GROUP_ATTR_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_GROUP_ATTR_VAL; i++) {
+ if (cpr_strncasecmp(tmp, sdp_group_attr_val[i].name,
+ sdp_group_attr_val[i].strlen) == 0) {
+ attr_p->attr.stream_data.group_attr = (sdp_group_attr_e)i;
+ break;
+ }
+ }
+
+ if (attr_p->attr.stream_data.group_attr == SDP_GROUP_ATTR_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Group attribute type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ }
+
+
+ /*
+ * Scan the input line up after group:<attr> to the maximum number
+ * of id available.
+ */
+ attr_p->attr.stream_data.num_group_id =0;
+
+ for (i=0; i<SDP_MAX_MEDIA_STREAMS; i++) {
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+ attr_p->attr.stream_data.group_ids[i] = cpr_strdup(tmp);
+ if (!attr_p->attr.stream_data.group_ids[i]) {
+ break;
+ }
+
+ attr_p->attr.stream_data.num_group_id++;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s:%s\n", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_group_attr_name (attr_p->attr.stream_data.group_attr));
+ for (i=0; i < attr_p->attr.stream_data.num_group_id; i++) {
+ SDP_PRINT("%s Parsed group line id : %s\n", sdp_p->debug_str,
+ attr_p->attr.stream_data.group_ids[i]);
+ }
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_group (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ int i;
+
+ flex_string_sprintf(fs, "a=%s:%s",
+ sdp_attr[attr_p->type].name,
+ sdp_get_group_attr_name(attr_p->attr.stream_data.group_attr));
+
+ for (i=0; i < attr_p->attr.stream_data.num_group_id; i++) {
+ if (attr_p->attr.stream_data.group_ids[i]) {
+ flex_string_sprintf(fs, " %s",
+ attr_p->attr.stream_data.group_ids[i]);
+ }
+ }
+
+ flex_string_append(fs, "\r\n");
+
+ return SDP_SUCCESS;
+}
+
+/* Parse the source-filter attribute
+ * "a=source-filter:<filter-mode><filter-spec>"
+ * <filter-spec> = <nettype><addrtype><dest-addr><src_addr><src_addr>...
+ */
+sdp_result_e sdp_parse_attr_source_filter (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ attr_p->attr.source_filter.mode = SDP_FILTER_MODE_NOT_PRESENT;
+ attr_p->attr.source_filter.nettype = SDP_NT_UNSUPPORTED;
+ attr_p->attr.source_filter.addrtype = SDP_AT_UNSUPPORTED;
+ attr_p->attr.source_filter.dest_addr[0] = '\0';
+ attr_p->attr.source_filter.num_src_addr = 0;
+
+ /* Find the filter mode */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No src filter attribute value specified for "
+ "a=source-filter line", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ for (i = 0; i < SDP_MAX_FILTER_MODE; i++) {
+ if (cpr_strncasecmp(tmp, sdp_src_filter_mode_val[i].name,
+ sdp_src_filter_mode_val[i].strlen) == 0) {
+ attr_p->attr.source_filter.mode = (sdp_src_filter_mode_e)i;
+ break;
+ }
+ }
+ if (attr_p->attr.source_filter.mode == SDP_FILTER_MODE_NOT_PRESENT) {
+ /* No point continuing */
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid src filter mode for a=source-filter "
+ "line", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the network type */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ for (i = 0; i < SDP_MAX_NETWORK_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_nettype[i].name,
+ sdp_nettype[i].strlen) == 0) {
+ if (sdp_p->conf_p->nettype_supported[i] == TRUE) {
+ attr_p->attr.source_filter.nettype = (sdp_nettype_e)i;
+ }
+ }
+ }
+ if (attr_p->attr.source_filter.nettype == SDP_NT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Network type unsupported "
+ "(%s) for a=source-filter", sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the address type */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ for (i = 0; i < SDP_MAX_ADDR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_addrtype[i].name,
+ sdp_addrtype[i].strlen) == 0) {
+ if (sdp_p->conf_p->addrtype_supported[i] == TRUE) {
+ attr_p->attr.source_filter.addrtype = (sdp_addrtype_e)i;
+ }
+ }
+ }
+ if (attr_p->attr.source_filter.addrtype == SDP_AT_UNSUPPORTED) {
+ if (strncmp(tmp, "*", 1) == 0) {
+ attr_p->attr.source_filter.addrtype = SDP_AT_FQDN;
+ } else {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Address type unsupported "
+ "(%s) for a=source-filter", sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+
+ /* Find the destination addr */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.source_filter.dest_addr,
+ sizeof(attr_p->attr.source_filter.dest_addr), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No filter destination address specified for "
+ "a=source-filter", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the list of source address to apply the filter */
+ for (i = 0; i < SDP_MAX_SRC_ADDR_LIST; i++) {
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.source_filter.src_list[i],
+ sizeof(attr_p->attr.source_filter.src_list[i]), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+ attr_p->attr.source_filter.num_src_addr++;
+ }
+ if (attr_p->attr.source_filter.num_src_addr == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No source list provided "
+ "for a=source-filter", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_source_filter (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ int i;
+
+ flex_string_sprintf(fs, "a=%s:%s %s %s %s",
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_src_filter_mode_name(attr_p->attr.source_filter.mode),
+ sdp_get_network_name(attr_p->attr.source_filter.nettype),
+ sdp_get_address_name(attr_p->attr.source_filter.addrtype),
+ attr_p->attr.source_filter.dest_addr);
+
+ for (i = 0; i < attr_p->attr.source_filter.num_src_addr; i++) {
+ flex_string_append(fs, " ");
+ flex_string_append(fs, attr_p->attr.source_filter.src_list[i]);
+ }
+
+ flex_string_append(fs, "\r\n");
+
+ return SDP_SUCCESS;
+}
+
+/* Parse the rtcp-unicast attribute
+ * "a=rtcp-unicast:<reflection|rsi>"
+ */
+sdp_result_e sdp_parse_attr_rtcp_unicast (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ uint32_t i;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ attr_p->attr.u32_val = SDP_RTCP_UNICAST_MODE_NOT_PRESENT;
+
+ memset(tmp, 0, sizeof(tmp));
+
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No rtcp unicast mode specified for "
+ "a=rtcp-unicast line", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ for (i = 0; i < SDP_RTCP_MAX_UNICAST_MODE; i++) {
+ if (cpr_strncasecmp(tmp, sdp_rtcp_unicast_mode_val[i].name,
+ sdp_rtcp_unicast_mode_val[i].strlen) == 0) {
+ attr_p->attr.u32_val = i;
+ break;
+ }
+ }
+ if (attr_p->attr.u32_val == SDP_RTCP_UNICAST_MODE_NOT_PRESENT) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid rtcp unicast mode for "
+ "a=rtcp-unicast line", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_attr_rtcp_unicast (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ if (attr_p->attr.u32_val >= SDP_RTCP_MAX_UNICAST_MODE) {
+ return SDP_INVALID_PARAMETER;
+ }
+
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_get_attr_name(attr_p->type),
+ sdp_get_rtcp_unicast_mode_name((sdp_rtcp_unicast_mode_e)attr_p->attr.u32_val));
+
+ return SDP_SUCCESS;
+}
+
+
+/*
+ * store_sdescriptions_mki_or_lifetime
+ *
+ * Verifies the syntax of the MKI or lifetime parameter and stores
+ * it in the sdescriptions attribute struct.
+ *
+ * Inputs:
+ * buf - pointer to MKI or lifetime string assumes string is null
+ * terminated.
+ * attr_p - pointer to attribute struct
+ *
+ * Outputs:
+ * Return TRUE all is good otherwise FALSE for error.
+ */
+
+tinybool
+store_sdescriptions_mki_or_lifetime (char *buf, sdp_attr_t *attr_p)
+{
+
+ tinybool result;
+ uint16_t mkiLen;
+ char mkiValue[SDP_SRTP_MAX_MKI_SIZE_BYTES];
+
+ /* MKI has a colon */
+ if (strstr(buf, ":")) {
+ result = verify_sdescriptions_mki(buf, mkiValue, &mkiLen);
+ if (result) {
+ attr_p->attr.srtp_context.mki_size_bytes = mkiLen;
+ sstrncpy((char*)attr_p->attr.srtp_context.mki, mkiValue,
+ SDP_SRTP_MAX_MKI_SIZE_BYTES);
+ }
+
+ } else {
+ result = verify_sdescriptions_lifetime(buf);
+ if (result) {
+ sstrncpy((char*)attr_p->attr.srtp_context.master_key_lifetime, buf,
+ SDP_SRTP_MAX_LIFETIME_BYTES);
+ }
+ }
+
+ return result;
+
+}
+
+/*
+ * sdp_parse_sdescriptions_key_param
+ *
+ * This routine parses the srtp key-params pointed to by str.
+ *
+ * key-params = <key-method> ":" <key-info>
+ * key-method = "inline" / key-method-ext [note V9 only supports 'inline']
+ * key-info = srtp-key-info
+ * srtp-key-info = key-salt ["|" lifetime] ["|" mki]
+ * key-salt = 1*(base64) ; binary key and salt values
+ * ; concatenated together, and then
+ * ; base64 encoded [section 6.8 of
+ * ; RFC2046]
+ *
+ * lifetime = ["2^"] 1*(DIGIT)
+ * mki = mki-value ":" mki-length
+ * mki-value = 1*DIGIT
+ * mki-length = 1*3DIGIT ; range 1..128.
+ *
+ * Inputs: str - pointer to beginning of key-params and assumes
+ * null terminated string.
+ */
+
+
+tinybool
+sdp_parse_sdescriptions_key_param (const char *str, sdp_attr_t *attr_p,
+ sdp_t *sdp_p)
+{
+ char buf[SDP_MAX_STRING_LEN],
+ base64decodeData[SDP_MAX_STRING_LEN];
+ const char *ptr;
+ sdp_result_e result = SDP_SUCCESS;
+ tinybool keyFound = FALSE;
+ int len,
+ keySize,
+ saltSize;
+ base64_result_t status;
+
+ ptr = str;
+ if (cpr_strncasecmp(ptr, "inline:", 7) != 0) {
+ sdp_parse_error(sdp_p,
+ "%s Could not find keyword inline", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return FALSE;
+ }
+
+ /* advance pass the inline key word */
+ ptr = ptr + 7;
+ ptr = sdp_getnextstrtok(ptr, buf, sizeof(buf), "|", &result);
+ while (result == SDP_SUCCESS) {
+ /* the fist time this loop executes, the key is gotten */
+ if (keyFound == FALSE) {
+ keyFound = TRUE;
+ len = SDP_MAX_STRING_LEN;
+ /* The key is base64 encoded composed of the master key concatenated with the
+ * master salt.
+ */
+ status = base64_decode((unsigned char *)buf, strlen(buf),
+ (unsigned char *)base64decodeData, &len);
+
+ if (status != BASE64_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s key-salt error decoding buffer: %s",
+ sdp_p->debug_str, BASE64_RESULT_TO_STRING(status));
+ return FALSE;
+ }
+
+ keySize = attr_p->attr.srtp_context.master_key_size_bytes;
+ saltSize = attr_p->attr.srtp_context.master_salt_size_bytes;
+
+ if (len != keySize + saltSize) {
+ sdp_parse_error(sdp_p,
+ "%s key-salt size doesn't match: (%d, %d, %d)",
+ sdp_p->debug_str, len, keySize, saltSize);
+ return(FALSE);
+ }
+
+ memcpy(attr_p->attr.srtp_context.master_key,
+ base64decodeData,
+ keySize);
+
+ memcpy(attr_p->attr.srtp_context.master_salt,
+ base64decodeData + keySize,
+ saltSize);
+
+ /* Used only for MGCP */
+ SDP_SRTP_CONTEXT_SET_MASTER_KEY
+ (attr_p->attr.srtp_context.selection_flags);
+ SDP_SRTP_CONTEXT_SET_MASTER_SALT
+ (attr_p->attr.srtp_context.selection_flags);
+
+ } else if (store_sdescriptions_mki_or_lifetime(buf, attr_p) == FALSE) {
+ return FALSE;
+ }
+
+ /* if we haven't reached the end of line, get the next token */
+ ptr = sdp_getnextstrtok(ptr, buf, sizeof(buf), "|", &result);
+ }
+
+ /* if we didn't find the key, error out */
+ if (keyFound == FALSE) {
+ sdp_parse_error(sdp_p,
+ "%s Could not find sdescriptions key", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return FALSE;
+ }
+
+ return TRUE;
+
+}
+
+/*
+ * sdp_build_attr_sdescriptions
+ *
+ * Builds a=crypto line for attribute type SDP_ATTR_SDESCRIPTIONS.
+ *
+ * a=crypto:tag 1*WSP crypto-suite 1*WSP key-params
+ *
+ * Where key-params = inline: <key|salt> ["|"lifetime] ["|" MKI:length]
+ * The key and salt is base64 encoded and lifetime and MKI/length are optional.
+ */
+
+sdp_result_e
+sdp_build_attr_sdescriptions (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+
+ unsigned char base64_encoded_data[MAX_BASE64_STRING_LEN];
+ unsigned char base64_encoded_input[MAX_BASE64_STRING_LEN];
+ int keySize,
+ saltSize,
+ outputLen;
+ base64_result_t status;
+
+ keySize = attr_p->attr.srtp_context.master_key_size_bytes;
+ saltSize = attr_p->attr.srtp_context.master_salt_size_bytes;
+
+ /* concatenate the master key + salt then base64 encode it */
+ memcpy(base64_encoded_input,
+ attr_p->attr.srtp_context.master_key,
+ keySize);
+
+ memcpy(base64_encoded_input + keySize,
+ attr_p->attr.srtp_context.master_salt,
+ saltSize);
+
+ outputLen = MAX_BASE64_STRING_LEN;
+ status = base64_encode(base64_encoded_input, keySize + saltSize,
+ base64_encoded_data, &outputLen);
+
+ if (status != BASE64_SUCCESS) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Error: Failure to Base64 Encoded data (%s) ",
+ sdp_p->debug_str, BASE64_RESULT_TO_STRING(status));
+ }
+ return (SDP_INVALID_PARAMETER);
+
+ }
+
+ base64_encoded_data[outputLen] = 0;
+
+ /* lifetime and MKI parameters are optional. Only inlcude them if
+ * they were set.
+ */
+
+
+ if (attr_p->attr.srtp_context.master_key_lifetime[0] != 0 &&
+ attr_p->attr.srtp_context.mki[0] != 0) {
+ flex_string_sprintf(fs, "a=%s:%d %s inline:%s|%s|%s:%d\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.srtp_context.tag,
+ sdp_srtp_context_crypto_suite[attr_p->attr.srtp_context.suite].name,
+ base64_encoded_data,
+ attr_p->attr.srtp_context.master_key_lifetime,
+ attr_p->attr.srtp_context.mki,
+ attr_p->attr.srtp_context.mki_size_bytes);
+
+ return SDP_SUCCESS;
+ }
+
+ /* if we get here, either lifetime is populated and mki and is not or mki is populated
+ * and lifetime is not or neither is populated
+ */
+
+ if (attr_p->attr.srtp_context.master_key_lifetime[0] != 0) {
+ flex_string_sprintf(fs, "a=%s:%d %s inline:%s|%s\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.srtp_context.tag,
+ sdp_srtp_context_crypto_suite[attr_p->attr.srtp_context.suite].name,
+ base64_encoded_data,
+ attr_p->attr.srtp_context.master_key_lifetime);
+
+ } else if (attr_p->attr.srtp_context.mki[0] != 0) {
+ flex_string_sprintf(fs, "a=%s:%d %s inline:%s|%s:%d\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.srtp_context.tag,
+ sdp_srtp_context_crypto_suite[attr_p->attr.srtp_context.suite].name,
+ base64_encoded_data,
+ attr_p->attr.srtp_context.mki,
+ attr_p->attr.srtp_context.mki_size_bytes);
+
+ } else {
+ flex_string_sprintf(fs, "a=%s:%d %s inline:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ attr_p->attr.srtp_context.tag,
+ sdp_srtp_context_crypto_suite[attr_p->attr.srtp_context.suite].name,
+ base64_encoded_data);
+
+ }
+
+ return SDP_SUCCESS;
+
+}
+
+
+/*
+ * sdp_parse_attr_srtp
+ *
+ * Parses Session Description for Protocol Security Descriptions
+ * version 2 or version 9. Grammar is of the form:
+ *
+ * a=crypto:<tag> <crypto-suite> <key-params> [<session-params>]
+ *
+ * Note session-params is not supported and will not be parsed.
+ * Version 2 does not contain a tag.
+ *
+ * Inputs:
+ * sdp_p - pointer to sdp handle
+ * attr_p - pointer to attribute structure
+ * ptr - pointer to string to be parsed
+ * vtype - version type
+ */
+
+sdp_result_e
+sdp_parse_attr_srtp (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr, sdp_attr_e vtype)
+{
+
+ char tmp[SDP_MAX_STRING_LEN];
+ sdp_result_e result = SDP_FAILURE;
+ int k = 0;
+
+ /* initialize only the optional parameters */
+ attr_p->attr.srtp_context.master_key_lifetime[0] = 0;
+ attr_p->attr.srtp_context.mki[0] = 0;
+
+ /* used only for MGCP */
+ SDP_SRTP_CONTEXT_SET_ENCRYPT_AUTHENTICATE
+ (attr_p->attr.srtp_context.selection_flags);
+
+ /* get the tag only if we are version 9 */
+ if (vtype == SDP_ATTR_SDESCRIPTIONS) {
+ attr_p->attr.srtp_context.tag =
+ sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Could not find sdescriptions tag",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+
+ }
+ }
+
+ /* get the crypto suite */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Could not find sdescriptions crypto suite", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (!sdp_parse_context_crypto_suite(tmp, attr_p, sdp_p)) {
+ sdp_parse_error(sdp_p,
+ "%s Unsupported crypto suite", sdp_p->debug_str);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Could not find sdescriptions key params", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (!sdp_parse_sdescriptions_key_param(tmp, attr_p, sdp_p)) {
+ sdp_parse_error(sdp_p,
+ "%s Failed to parse key-params", sdp_p->debug_str);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* if there are session parameters, scan the session parameters
+ * into tmp until we reach end of line. Currently the sdp parser
+ * does not parse session parameters but if they are present,
+ * we store them for the application.
+ */
+ /*sa_ignore NO_NULL_CHK
+ *{ptr is valid since the pointer was checked earlier and the
+ * function would have exited if NULL.}
+ */
+ while (*ptr && *ptr != '\n' && *ptr != '\r' && k < SDP_MAX_STRING_LEN) {
+ tmp[k++] = *ptr++;
+ }
+
+ if ((k) && (k < SDP_MAX_STRING_LEN)) {
+ tmp[k] = 0;
+ attr_p->attr.srtp_context.session_parameters = cpr_strdup(tmp);
+ }
+
+ return SDP_SUCCESS;
+
+}
+
+/* Parses crypto attribute based on the sdescriptions version
+ * 9 grammar.
+ *
+ */
+
+sdp_result_e
+sdp_parse_attr_sdescriptions (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+
+ return sdp_parse_attr_srtp(sdp_p, attr_p, ptr,
+ SDP_ATTR_SDESCRIPTIONS);
+
+}
+
+/* Parses X-crypto attribute based on the sdescriptions version
+ * 2 grammar.
+ *
+ */
+
+sdp_result_e sdp_parse_attr_srtpcontext (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+
+ return sdp_parse_attr_srtp(sdp_p, attr_p, ptr,
+ SDP_ATTR_SRTP_CONTEXT);
+}
+
+
+sdp_result_e sdp_build_attr_ice_attr (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs) {
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.ice_attr);
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_parse_attr_ice_attr (sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr) {
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), "\r\n", &result);
+ if (result != SDP_SUCCESS){
+ sdp_parse_error(sdp_p,
+ "%s Warning: problem parsing ice attribute ", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ snprintf(attr_p->attr.ice_attr, sizeof(attr_p->attr.ice_attr), "%s", tmp);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str, sdp_get_attr_name(attr_p->type), tmp);
+ }
+ return (SDP_SUCCESS);
+}
+
+
+sdp_result_e sdp_build_attr_simple_flag (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs) {
+ flex_string_sprintf(fs, "a=%s\r\n", sdp_get_attr_name(attr_p->type));
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_parse_attr_simple_flag (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr) {
+ /* No parameters to parse. */
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type));
+ }
+
+ return (SDP_SUCCESS);
+}
+
+static sdp_result_e sdp_parse_attr_line(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr, char *buf, size_t buf_len) {
+ sdp_result_e result;
+
+ (void)sdp_getnextstrtok(ptr, buf, buf_len, "\r\n", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No string token found for %s attribute",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ buf);
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+sdp_result_e sdp_parse_attr_complete_line (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ return sdp_parse_attr_line(sdp_p, attr_p, ptr,
+ attr_p->attr.string_val,
+ sizeof(attr_p->attr.string_val));
+}
+
+sdp_result_e sdp_parse_attr_long_line (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ char buffer[SDP_MAX_LONG_STRING_LEN];
+
+ result = sdp_parse_attr_line(sdp_p, attr_p, ptr,
+ buffer, sizeof(buffer));
+ if (result == SDP_SUCCESS) {
+ attr_p->attr.stringp = cpr_strdup(buffer);
+ }
+ return result;
+}
+
+sdp_result_e sdp_build_attr_long_line (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:%s\r\n", sdp_attr[attr_p->type].name,
+ attr_p->attr.stringp);
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_rtcp_fb(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=%s:", sdp_attr[attr_p->type].name);
+
+ /* Payload Type */
+ if (attr_p->attr.rtcp_fb.payload_num == SDP_ALL_PAYLOADS) {
+ flex_string_sprintf(fs, "* ");
+ } else {
+ flex_string_sprintf(fs, "%d ",attr_p->attr.rtcp_fb.payload_num);
+ }
+
+ /* Feedback Type */
+ if (attr_p->attr.rtcp_fb.feedback_type < SDP_RTCP_FB_UNKNOWN) {
+ flex_string_sprintf(fs, "%s",
+ sdp_rtcp_fb_type_val[attr_p->attr.rtcp_fb.feedback_type].name);
+ }
+
+ /* Feedback Type Parameters */
+ switch (attr_p->attr.rtcp_fb.feedback_type) {
+ case SDP_RTCP_FB_ACK:
+ if (attr_p->attr.rtcp_fb.param.ack < SDP_MAX_RTCP_FB_ACK) {
+ flex_string_sprintf(fs, " %s",
+ sdp_rtcp_fb_ack_type_val[attr_p->attr.rtcp_fb.param.ack]
+ .name);
+ }
+ break;
+ case SDP_RTCP_FB_CCM: /* RFC 5104 */
+ if (attr_p->attr.rtcp_fb.param.ccm < SDP_MAX_RTCP_FB_CCM) {
+ flex_string_sprintf(fs, " %s",
+ sdp_rtcp_fb_ccm_type_val[attr_p->attr.rtcp_fb.param.ccm]
+ .name);
+ }
+ break;
+ case SDP_RTCP_FB_NACK:
+ if (attr_p->attr.rtcp_fb.param.nack > SDP_RTCP_FB_NACK_BASIC
+ && attr_p->attr.rtcp_fb.param.nack < SDP_MAX_RTCP_FB_NACK) {
+ flex_string_sprintf(fs, " %s",
+ sdp_rtcp_fb_nack_type_val[attr_p->attr.rtcp_fb.param.nack]
+ .name);
+ }
+ break;
+ case SDP_RTCP_FB_TRR_INT:
+ flex_string_sprintf(fs, " %u", attr_p->attr.rtcp_fb.param.trr_int);
+ break;
+ case SDP_RTCP_FB_REMB:
+ /* No additional params after REMB */
+ break;
+
+ case SDP_RTCP_FB_UNKNOWN:
+ /* Contents are in the "extra" field */
+ break;
+
+ default:
+ CSFLogError(logTag, "%s Error: Invalid rtcp-fb enum (%d)",
+ sdp_p->debug_str, attr_p->attr.rtcp_fb.feedback_type);
+ return SDP_FAILURE;
+ }
+
+ /* Tack on any information that cannot otherwise be represented by
+ * the sdp_fmtp_fb_t structure. */
+ if (attr_p->attr.rtcp_fb.extra[0]) {
+ flex_string_sprintf(fs, " %s", attr_p->attr.rtcp_fb.extra);
+ }
+
+ /* Line ending */
+ flex_string_sprintf(fs, "\r\n");
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_rtcp_fb (sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result = SDP_SUCCESS;
+ sdp_fmtp_fb_t *rtcp_fb_p = &(attr_p->attr.rtcp_fb);
+ int i;
+
+ /* Set up attribute fields */
+ rtcp_fb_p->payload_num = 0;
+ rtcp_fb_p->feedback_type = SDP_RTCP_FB_UNKNOWN;
+ rtcp_fb_p->extra[0] = '\0';
+
+ /* Skip WS (just in case) */
+ while (*ptr == ' ' || *ptr == '\t') {
+ ptr++;
+ }
+
+ /* Look for the special "*" payload type */
+ if (*ptr == '*') {
+ rtcp_fb_p->payload_num = SDP_ALL_PAYLOADS;
+ ptr++;
+ } else {
+ /* If the pt is not '*', parse it out as an integer */
+ rtcp_fb_p->payload_num = (uint16_t)sdp_getnextnumtok(ptr, &ptr,
+ " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse payload type for rtcp-fb attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+
+ return SDP_INVALID_PARAMETER;
+ }
+ }
+
+ /* Read feedback type */
+ i = find_token_enum("rtcp-fb attribute", sdp_p, &ptr, sdp_rtcp_fb_type_val,
+ SDP_MAX_RTCP_FB, SDP_RTCP_FB_UNKNOWN);
+ if (i < 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse feedback type for rtcp-fb attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ rtcp_fb_p->feedback_type = (sdp_rtcp_fb_type_e) i;
+
+ switch(rtcp_fb_p->feedback_type) {
+ case SDP_RTCP_FB_ACK:
+ i = find_token_enum("rtcp-fb ack type", sdp_p, &ptr,
+ sdp_rtcp_fb_ack_type_val,
+ SDP_MAX_RTCP_FB_ACK, SDP_RTCP_FB_ACK_UNKNOWN);
+ if (i < 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse ack type for rtcp-fb attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ rtcp_fb_p->param.ack = (sdp_rtcp_fb_ack_type_e) i;
+ break;
+
+ case SDP_RTCP_FB_CCM:
+ i = find_token_enum("rtcp-fb ccm type", sdp_p, &ptr,
+ sdp_rtcp_fb_ccm_type_val,
+ SDP_MAX_RTCP_FB_CCM, SDP_RTCP_FB_CCM_UNKNOWN);
+ if (i < 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse ccm type for rtcp-fb attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ rtcp_fb_p->param.ccm = (sdp_rtcp_fb_ccm_type_e) i;
+
+ /* TODO -- We don't currently parse tmmbr parameters or vbcm
+ submessage types. If we decide to support these modes of
+ operation, we probably want to add parsing code for them.
+ For the time being, they'll just end up parsed into "extra"
+ Bug 1097169.
+ */
+ break;
+
+ case SDP_RTCP_FB_NACK:
+ /* Skip any remaining WS -- see
+ http://code.google.com/p/webrtc/issues/detail?id=1922 */
+ while (*ptr == ' ' || *ptr == '\t') {
+ ptr++;
+ }
+ /* Check for empty string */
+ if (*ptr == '\r') {
+ rtcp_fb_p->param.nack = SDP_RTCP_FB_NACK_BASIC;
+ break;
+ }
+ i = find_token_enum("rtcp-fb nack type", sdp_p, &ptr,
+ sdp_rtcp_fb_nack_type_val,
+ SDP_MAX_RTCP_FB_NACK, SDP_RTCP_FB_NACK_UNKNOWN);
+ if (i < 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse nack type for rtcp-fb attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ rtcp_fb_p->param.nack = (sdp_rtcp_fb_nack_type_e) i;
+ break;
+
+ case SDP_RTCP_FB_TRR_INT:
+ rtcp_fb_p->param.trr_int = sdp_getnextnumtok(ptr, &ptr,
+ " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse trr-int value for rtcp-fb "
+ "attribute", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ break;
+
+ case SDP_RTCP_FB_REMB:
+ /* No additional tokens to parse after goog-remb */
+ break;
+
+ case SDP_RTCP_FB_UNKNOWN:
+ /* Handled by "extra", below */
+ break;
+
+ default:
+ /* This is an internal error, not a parsing error */
+ CSFLogError(logTag, "%s Error: Invalid rtcp-fb enum (%d)",
+ sdp_p->debug_str, attr_p->attr.rtcp_fb.feedback_type);
+ return SDP_FAILURE;
+ }
+
+ /* Skip any remaining WS */
+ while (*ptr == ' ' || *ptr == '\t') {
+ ptr++;
+ }
+
+ /* Just store the rest of the line in "extra" -- this will return
+ a failure result if there is no more text, but that's fine. */
+ ptr = sdp_getnextstrtok(ptr, rtcp_fb_p->extra,
+ sizeof(rtcp_fb_p->extra), "\r\n", &result);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_setup(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ switch (attr_p->attr.setup) {
+ case SDP_SETUP_ACTIVE:
+ case SDP_SETUP_PASSIVE:
+ case SDP_SETUP_ACTPASS:
+ case SDP_SETUP_HOLDCONN:
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_setup_type_val[attr_p->attr.setup].name);
+ break;
+ default:
+ CSFLogError(logTag, "%s Error: Invalid setup enum (%d)",
+ sdp_p->debug_str, attr_p->attr.setup);
+ return SDP_FAILURE;
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_setup(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i = find_token_enum("setup attribute", sdp_p, &ptr,
+ sdp_setup_type_val,
+ SDP_MAX_SETUP, SDP_SETUP_UNKNOWN);
+
+ if (i < 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse setup attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ attr_p->attr.setup = (sdp_setup_type_e) i;
+
+ switch (attr_p->attr.setup) {
+ case SDP_SETUP_ACTIVE:
+ case SDP_SETUP_PASSIVE:
+ case SDP_SETUP_ACTPASS:
+ case SDP_SETUP_HOLDCONN:
+ /* All these values are OK */
+ break;
+ case SDP_SETUP_UNKNOWN:
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unknown setup attribute",
+ sdp_p->debug_str);
+ return SDP_INVALID_PARAMETER;
+ default:
+ /* This is an internal error, not a parsing error */
+ CSFLogError(logTag, "%s Error: Invalid setup enum (%d)",
+ sdp_p->debug_str, attr_p->attr.setup);
+ return SDP_FAILURE;
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_connection(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ switch (attr_p->attr.connection) {
+ case SDP_CONNECTION_NEW:
+ case SDP_CONNECTION_EXISTING:
+ flex_string_sprintf(fs, "a=%s:%s\r\n",
+ sdp_attr[attr_p->type].name,
+ sdp_connection_type_val[attr_p->attr.connection].name);
+ break;
+ default:
+ CSFLogError(logTag, "%s Error: Invalid connection enum (%d)",
+ sdp_p->debug_str, attr_p->attr.connection);
+ return SDP_FAILURE;
+ }
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_connection(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ int i = find_token_enum("connection attribute", sdp_p, &ptr,
+ sdp_connection_type_val,
+ SDP_MAX_CONNECTION, SDP_CONNECTION_UNKNOWN);
+
+ if (i < 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: could not parse connection attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ attr_p->attr.connection = (sdp_connection_type_e) i;
+
+ switch (attr_p->attr.connection) {
+ case SDP_CONNECTION_NEW:
+ case SDP_CONNECTION_EXISTING:
+ /* All these values are OK */
+ break;
+ case SDP_CONNECTION_UNKNOWN:
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unknown connection attribute",
+ sdp_p->debug_str);
+ return SDP_INVALID_PARAMETER;
+ default:
+ /* This is an internal error, not a parsing error */
+ CSFLogError(logTag, "%s Error: Invalid connection enum (%d)",
+ sdp_p->debug_str, attr_p->attr.connection);
+ return SDP_FAILURE;
+ }
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_build_attr_extmap(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=extmap:%d %s\r\n",
+ attr_p->attr.extmap.id,
+ attr_p->attr.extmap.uri);
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_extmap(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ attr_p->attr.extmap.id = 0;
+ attr_p->attr.extmap.media_direction = SDP_DIRECTION_SENDRECV;
+ attr_p->attr.extmap.media_direction_specified = FALSE;
+ attr_p->attr.extmap.uri[0] = '\0';
+ attr_p->attr.extmap.extension_attributes[0] = '\0';
+
+ /* Find the payload type number. */
+ attr_p->attr.extmap.id =
+ (uint16_t)sdp_getnextnumtok(ptr, &ptr, "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid extmap id specified for %s attribute.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (*ptr == '/') {
+ char direction[SDP_MAX_STRING_LEN+1];
+ ++ptr; /* Skip over '/' */
+ ptr = sdp_getnextstrtok(ptr, direction,
+ sizeof(direction), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid direction specified in %s attribute.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (!cpr_strcasecmp(direction, "sendrecv")) {
+ attr_p->attr.extmap.media_direction = SDP_DIRECTION_SENDRECV;
+ } else if (!cpr_strcasecmp(direction, "sendonly")) {
+ attr_p->attr.extmap.media_direction = SDP_DIRECTION_SENDONLY;
+ } else if (!cpr_strcasecmp(direction, "recvonly")) {
+ attr_p->attr.extmap.media_direction = SDP_DIRECTION_RECVONLY;
+ } else if (!cpr_strcasecmp(direction, "inactive")) {
+ attr_p->attr.extmap.media_direction = SDP_DIRECTION_INACTIVE;
+ } else {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid direction specified in %s attribute.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.extmap.media_direction_specified = TRUE;
+ }
+
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.extmap.uri,
+ sizeof(attr_p->attr.extmap.uri), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No uri specified in %s attribute.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_p->type));
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ while (*ptr == ' ' || *ptr == '\t') {
+ ++ptr;
+ }
+
+ /* Grab everything that follows, even if it contains whitespace */
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.extmap.extension_attributes,
+ sizeof(attr_p->attr.extmap.extension_attributes), "\r\n", &result);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=%s, id %u, direction %s, "
+ "uri %s, extension %s", sdp_p->debug_str,
+ sdp_get_attr_name(attr_p->type),
+ attr_p->attr.extmap.id,
+ SDP_DIRECTION_PRINT(attr_p->attr.extmap.media_direction),
+ attr_p->attr.extmap.uri,
+ attr_p->attr.extmap.extension_attributes);
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_attr_msid(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.msid.identifier,
+ sizeof(attr_p->attr.msid.identifier), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p, "%s Warning: Bad msid identity value",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ ptr = sdp_getnextstrtok(ptr, attr_p->attr.msid.appdata,
+ sizeof(attr_p->attr.msid.appdata), " \t", &result);
+ if ((result != SDP_SUCCESS) && (result != SDP_EMPTY_TOKEN)) {
+ sdp_parse_error(sdp_p, "%s Warning: Bad msid appdata value",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ if (result == SDP_EMPTY_TOKEN) {
+ attr_p->attr.msid.appdata[0] = '\0';
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=msid, %s %s", sdp_p->debug_str,
+ attr_p->attr.msid.identifier, attr_p->attr.msid.appdata);
+ }
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_build_attr_msid(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=msid:%s%s%s\r\n",
+ attr_p->attr.msid.identifier,
+ attr_p->attr.msid.appdata[0] ? " " : "",
+ attr_p->attr.msid.appdata);
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_msid_semantic(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+ int i;
+
+ ptr = sdp_getnextstrtok(ptr,
+ attr_p->attr.msid_semantic.semantic,
+ sizeof(attr_p->attr.msid_semantic.semantic),
+ " \t",
+ &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p, "%s Warning: Bad msid-semantic attribute; "
+ "missing semantic",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ for (i = 0; i < SDP_MAX_MEDIA_STREAMS; ++i) {
+ /* msid-id can be up to 64 characters long, plus null terminator */
+ char temp[65];
+ ptr = sdp_getnextstrtok(ptr, temp, sizeof(temp), " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+
+ attr_p->attr.msid_semantic.msids[i] = cpr_strdup(temp);
+ }
+
+ if ((result != SDP_SUCCESS) && (result != SDP_EMPTY_TOKEN)) {
+ sdp_parse_error(sdp_p, "%s Warning: Bad msid-semantic attribute",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed a=msid-semantic, %s", sdp_p->debug_str,
+ attr_p->attr.msid_semantic.semantic);
+ for (i = 0; i < SDP_MAX_MEDIA_STREAMS; ++i) {
+ if (!attr_p->attr.msid_semantic.msids[i]) {
+ break;
+ }
+
+ SDP_PRINT("%s ... msid %s", sdp_p->debug_str,
+ attr_p->attr.msid_semantic.msids[i]);
+ }
+ }
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_build_attr_msid_semantic(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ int i;
+ flex_string_sprintf(fs, "a=msid-semantic:%s",
+ attr_p->attr.msid_semantic.semantic);
+ for (i = 0; i < SDP_MAX_MEDIA_STREAMS; ++i) {
+ if (!attr_p->attr.msid_semantic.msids[i]) {
+ break;
+ }
+
+ flex_string_sprintf(fs, " %s",
+ attr_p->attr.msid_semantic.msids[i]);
+ }
+ flex_string_sprintf(fs, "\r\n");
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_attr_ssrc(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr)
+{
+ sdp_result_e result;
+
+ attr_p->attr.ssrc.ssrc =
+ (uint32_t)sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p, "%s Warning: Bad ssrc attribute, cannot parse ssrc",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ /* Skip any remaining WS */
+ while (*ptr == ' ' || *ptr == '\t') {
+ ptr++;
+ }
+
+ /* Just store the rest of the line in "attribute" -- this will return
+ a failure result if there is no more text, but that's fine. */
+ ptr = sdp_getnextstrtok(ptr,
+ attr_p->attr.ssrc.attribute,
+ sizeof(attr_p->attr.ssrc.attribute),
+ "\r\n",
+ &result);
+
+ return SDP_SUCCESS;
+}
+
+
+sdp_result_e sdp_build_attr_ssrc(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs)
+{
+ flex_string_sprintf(fs, "a=ssrc:%s%s%s\r\n",
+ attr_p->attr.ssrc.ssrc,
+ attr_p->attr.ssrc.attribute[0] ? " " : "",
+ attr_p->attr.ssrc.attribute);
+ return SDP_SUCCESS;
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_attr_access.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_attr_access.c
new file mode 100644
index 000000000..0562e537e
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_attr_access.c
@@ -0,0 +1,6372 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+
+static const char* logTag = "sdp_attr_access";
+
+/* Attribute access routines are all defined by the following parameters.
+ *
+ * sdp_p The SDP handle returned by sdp_init_description.
+ * level The level the attribute is defined. Can be either
+ * SDP_SESSION_LEVEL or 0-n specifying a media line level.
+ * inst_num The instance number of the attribute. Multiple instances
+ * of a particular attribute may exist at each level and so
+ * the inst_num determines the particular attribute at that
+ * level that should be accessed. Note that this is the
+ * instance number of the specified type of attribute, not the
+ * overall attribute number at the level. Also note that the
+ * instance number is 1-based. For example:
+ * v=0
+ * o=mhandley 2890844526 2890842807 IN IP4 126.16.64.4
+ * s=SDP Seminar
+ * c=IN IP4 10.1.0.2
+ * t=0 0
+ * m=audio 1234 RTP/AVP 0 101 102
+ * a=foo 1
+ * a=foo 2
+ * a=bar 1 # This is instance 1 of attribute bar.
+ * a=foo 3 # This is instance 3 of attribute foo.
+ * cap_num Almost all of the attributes may be defined as X-cpar
+ * parameters (with the exception of X-sqn, X-cap, and X-cpar).
+ * If the cap_num is set to zero, then the attribute is not
+ * an X-cpar parameter attribute. If the cap_num is any other
+ * value, it specifies the capability number that the X-cpar
+ * attribute is specified for.
+ */
+
+/* Attribute handling:
+ *
+ * There are two basic types of attributes handled by the SDP library,
+ * those defined by a= token lines, and those embedded with a=X-cpar lines.
+ * The handling for each of these is described here.
+ *
+ * Simple (non X-cpar attributes):
+ *
+ * Attributes not embedded in a=X-cpar lines are referenced by level and
+ * instance number. For these attributes the capability number is always
+ * set to zero.
+ *
+ * An application will typically process these attributes in one of two ways.
+ * With the first method, the application can determine the total number
+ * of attributes defined at a given level and process them one at a time.
+ * For each attribute, the application will query the library to find out
+ * what type of attribute it is and which instance within that type. The
+ * application can then process this particular attribute referencing it
+ * by level and instance number.
+ *
+ * A second method of processing attributes is for applications to determine
+ * each type of attribute they are interested in, query the SDP library to
+ * find out how many of that type of attribute exist at a given level, and
+ * process each one at a time.
+ *
+ * X-cpar attribute processing:
+ *
+ * X-cpar attributes can contain embedded attributes. They are associated
+ * with X-cap attribute lines. An example of X-cap and X-cpar attributes
+ * found in an SDP is as follows:
+ *
+ * v=0
+ * o=- 25678 753849 IN IP4 128.96.41.1
+ * s=-
+ * t=0 0
+ * c=IN IP4 10.1.0.2
+ * m=audio 3456 RTP/AVP 18 96
+ * a=rtpmap:96 telephone-event/8000
+ * a=fmtp:96 0-15,32-35
+ * a=X-sqn: 0
+ * a=X-cap: 1 audio RTP/AVP 0 18 96 97
+ * a=X-cpar: a=fmtp:96 0-16,32-35
+ * a=X-cpar: a=rtpmap:97 X-NSE/8000
+ * a=X-cpar: a=fmtp:97 195-197
+ * a=X-cap: 5 image udptl t38
+ * a=X-cap: 6 application udp X-tmr
+ * a=X-cap: 7 audio RTP/AVP 100 101
+ * a=X-cpar: a=rtpmap:100 g.711/8000
+ * a=X-cpar: a=rtpmap:101 g.729/8000
+ *
+ * X-cap attributes can be defined at the SESSION_LEVEL or any media level.
+ * An X-cap attr is defined by the level and instance number just like
+ * other attributes. In the example above, X-cap attrs are defined at
+ * media level 1 and there are four instances at that level.
+ *
+ * The X-cpar attributes can also be referenced by level and instance number.
+ * However, the embedded attribute within an X-cpar attribute must be
+ * referenced by level, instance number, and capability number. This is
+ * because the X-cpar attribute is associated with a particular X-cap/
+ * capability.
+ * For all attributes that are not embedded within an X-cpar attribute, the
+ * cap_num should be referenced as zero. But for X-cpar attributes, the
+ * cap_num is specified to be one of the capability numbers of the previous
+ * X-cap line. The number of capabilities specified in an X-cap line is
+ * equal to the number of payloads. Thus, in this example, the first X-cap
+ * attr instance specifies capabilities 1-4, the second specifies capability
+ * 5, the third capability 6, and the fourth capabilities 7-8.
+ *
+ * X-cpar attributes can be processed with methods similar to the two
+ * previously mentioned. For each X-cap attribute, the application can
+ * use one of two methods to process the X-cpar attributes. First, it
+ * can query the total number of X-cpar attributes associated with a
+ * given X-cap attribute. The X-cap attribute is here defined by a level
+ * and a capability number. In the example above, the total number of
+ * attributes defined is as follows:
+ * level 1, cap_num 1 - total attrs: 3
+ * level 1, cap_num 5 - total attrs: 0
+ * level 1, cap_num 6 - total attrs: 0
+ * level 1, cap_num 7 - total attrs: 2
+ *
+ * Note that if the application queried the number of attributes for
+ * cap_num 2, 3, or 4, it would also return 3 attrs, and for cap_num
+ * 8 the library would return 2.
+ *
+ * Once the application determines the total number of attributes for
+ * that capability, it can again query the embedded attribute type and
+ * instance. For example, sdp_get_attr_type would return the following:
+ * level 1, cap_num 1, attr 1 -> attr type fmtp, instance 1
+ * level 1, cap_num 1, attr 2 -> attr type rtpmap, instance 1
+ * level 1, cap_num 1, attr 3 -> attr type fmtp, instance 2
+ * level 1, cap_num 7, attr 1 -> attr type rtpmap, instance 1
+ * level 1, cap_num 7, attr 2 -> attr type rtpmap, instance 2
+ *
+ * The individual embedded attributes can then be accessed by level,
+ * cap_num, and instance number.
+ *
+ * With the second method for handling X-cpar attributes, the application
+ * determines the types of attributes it is interested in. It can then
+ * query the SDP library to determine the number of attributes of that
+ * type found for that level and cap_num, and then process each one at
+ * a time. e.g., calling sdp_attr_num_instances would give:
+ * level 1, cap_num 1, attr_type fmtp -> two instances
+ * level 1, cap_num 1, attr_type rtpmap -> one instance
+ * level 1, cap_num 7, attr_type fmtp -> zero instances
+ * level 1, cap_num 7, attr_type rtpmap -> two instances
+ */
+
+
+/* Function: sdp_add_new_attr
+ * Description: Add a new attribute of the specified type at the given
+ * level and capability level or base attribute if cap_num
+ * is zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * attr_type The type of attribute to add.
+ * inst_num Pointer to a uint16_t in which to return the instance
+ * number of the newly added attribute.
+ * Returns: SDP_SUCCESS Attribute was added successfully.
+ * SDP_NO_RESOURCE No memory avail for new attribute.
+ * SDP_INVALID_PARAMETER Specified media line is not defined.
+ */
+sdp_result_e sdp_add_new_attr (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_e attr_type, uint16_t *inst_num)
+{
+ uint16_t i;
+ sdp_mca_t *mca_p;
+ sdp_mca_t *cap_p;
+ sdp_attr_t *attr_p;
+ sdp_attr_t *new_attr_p;
+ sdp_attr_t *prev_attr_p=NULL;
+ sdp_fmtp_t *fmtp_p;
+ sdp_comediadir_t *comediadir_p;
+
+ *inst_num = 0;
+
+ if ((cap_num != 0) &&
+ ((attr_type == SDP_ATTR_X_CAP) || (attr_type == SDP_ATTR_X_CPAR) ||
+ (attr_type == SDP_ATTR_X_SQN) || (attr_type == SDP_ATTR_CDSC) ||
+ (attr_type == SDP_ATTR_CPAR) || (attr_type == SDP_ATTR_SQN))) {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Warning: Invalid attribute type for X-cpar/cdsc "
+ "parameter.", sdp_p->debug_str);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Some attributes are valid only under media level */
+ if (level == SDP_SESSION_LEVEL) {
+ switch (attr_type) {
+ case SDP_ATTR_RTCP:
+ case SDP_ATTR_LABEL:
+ return (SDP_INVALID_MEDIA_LEVEL);
+
+ default:
+ break;
+ }
+ }
+
+ new_attr_p = (sdp_attr_t *)SDP_MALLOC(sizeof(sdp_attr_t));
+ if (new_attr_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+
+ new_attr_p->type = attr_type;
+ new_attr_p->next_p = NULL;
+
+ /* Initialize the new attribute structure */
+ if ((new_attr_p->type == SDP_ATTR_X_CAP) ||
+ (new_attr_p->type == SDP_ATTR_CDSC)) {
+ new_attr_p->attr.cap_p = (sdp_mca_t *)SDP_MALLOC(sizeof(sdp_mca_t));
+ if (new_attr_p->attr.cap_p == NULL) {
+ sdp_free_attr(new_attr_p);
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+ } else if (new_attr_p->type == SDP_ATTR_FMTP) {
+ fmtp_p = &(new_attr_p->attr.fmtp);
+ fmtp_p->fmtp_format = SDP_FMTP_UNKNOWN_TYPE;
+ // set to invalid value
+ fmtp_p->packetization_mode = SDP_INVALID_PACKETIZATION_MODE_VALUE;
+ fmtp_p->level_asymmetry_allowed = SDP_INVALID_LEVEL_ASYMMETRY_ALLOWED_VALUE;
+ fmtp_p->annexb_required = FALSE;
+ fmtp_p->annexa_required = FALSE;
+ fmtp_p->maxval = 0;
+ fmtp_p->bitrate = 0;
+ fmtp_p->cif = 0;
+ fmtp_p->qcif = 0;
+ fmtp_p->profile = SDP_INVALID_VALUE;
+ fmtp_p->level = SDP_INVALID_VALUE;
+ fmtp_p->parameter_add = SDP_FMTP_UNUSED;
+ fmtp_p->usedtx = SDP_FMTP_UNUSED;
+ fmtp_p->stereo = SDP_FMTP_UNUSED;
+ fmtp_p->useinbandfec = SDP_FMTP_UNUSED;
+ fmtp_p->cbr = SDP_FMTP_UNUSED;
+ for (i=0; i < SDP_NE_NUM_BMAP_WORDS; i++) {
+ fmtp_p->bmap[i] = 0;
+ }
+ } else if ((new_attr_p->type == SDP_ATTR_RTPMAP) ||
+ (new_attr_p->type == SDP_ATTR_SPRTMAP)) {
+ new_attr_p->attr.transport_map.num_chan = 1;
+ } else if (new_attr_p->type == SDP_ATTR_DIRECTION) {
+ comediadir_p = &(new_attr_p->attr.comediadir);
+ comediadir_p->role = SDP_MEDIADIR_ROLE_PASSIVE;
+ comediadir_p->conn_info_present = FALSE;
+ } else if (new_attr_p->type == SDP_ATTR_MPTIME) {
+ sdp_mptime_t *mptime = &(new_attr_p->attr.mptime);
+ mptime->num_intervals = 0;
+ }
+
+ if (cap_num == 0) {
+ /* Add a new attribute. */
+ if (level == SDP_SESSION_LEVEL) {
+ if (sdp_p->sess_attrs_p == NULL) {
+ sdp_p->sess_attrs_p = new_attr_p;
+ } else {
+ for (attr_p = sdp_p->sess_attrs_p;
+ attr_p != NULL;
+ prev_attr_p = attr_p, attr_p = attr_p->next_p) {
+ /* Count the num instances of this type. */
+ if (attr_p->type == attr_type) {
+ (*inst_num)++;
+ }
+ }
+ prev_attr_p->next_p = new_attr_p;
+ }
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_free_attr(new_attr_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ if (mca_p->media_attrs_p == NULL) {
+ mca_p->media_attrs_p = new_attr_p;
+ } else {
+ for (attr_p = mca_p->media_attrs_p;
+ attr_p != NULL;
+ prev_attr_p = attr_p, attr_p = attr_p->next_p) {
+ /* Count the num instances of this type. */
+ if (attr_p->type == attr_type) {
+ (*inst_num)++;
+ }
+ }
+ prev_attr_p->next_p = new_attr_p;
+ }
+ }
+ } else {
+ /* Add a new capability attribute - find the capability attr. */
+ attr_p = sdp_find_capability(sdp_p, level, cap_num);
+ if (attr_p == NULL) {
+ sdp_free_attr(new_attr_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ cap_p = attr_p->attr.cap_p;
+ if (cap_p->media_attrs_p == NULL) {
+ cap_p->media_attrs_p = new_attr_p;
+ } else {
+ for (attr_p = cap_p->media_attrs_p;
+ attr_p != NULL;
+ prev_attr_p = attr_p, attr_p = attr_p->next_p) {
+ /* Count the num instances of this type. */
+ if (attr_p->type == attr_type) {
+ (*inst_num)++;
+ }
+ }
+ prev_attr_p->next_p = new_attr_p;
+ }
+ }
+
+ /* Increment the instance num for the attr just added. */
+ (*inst_num)++;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_num_instances
+ * Description: Get the number of attributes of the specified type at
+ * the given level and capability level.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * attr_type The type of attribute to add.
+ * num_attr_inst Pointer to a uint16_t in which to return the
+ * number of attributes.
+ * Returns: SDP_SUCCESS Attribute was added successfully.
+ * SDP_INVALID_PARAMETER Specified media line is not defined.
+ */
+sdp_result_e sdp_attr_num_instances (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_e attr_type, uint16_t *num_attr_inst)
+{
+ sdp_attr_t *attr_p;
+ sdp_result_e rc;
+ static char fname[] = "attr_num_instances";
+
+ *num_attr_inst = 0;
+
+ rc = sdp_find_attr_list(sdp_p, level, cap_num, &attr_p, fname);
+ if (rc == SDP_SUCCESS) {
+ /* Found the attr list. Count the number of attrs of the given
+ * type at this level. */
+ for (; attr_p != NULL; attr_p = attr_p->next_p) {
+ if (attr_p->type == attr_type) {
+ (*num_attr_inst)++;
+ }
+ }
+
+ }
+
+ return (rc);
+}
+
+/* Forward declaration for use in sdp_free_attr */
+static boolean sdp_attr_is_long_string(sdp_attr_e attr_type);
+
+
+/* Internal routine to free the memory associated with an attribute.
+ * Certain attributes allocate additional memory. Free this and then
+ * free the attribute itself.
+ * Note that this routine may be called at any point (i.e., may be
+ * called due to a failure case) and so the additional memory
+ * associated with an attribute may or may not have been already
+ * allocated. This routine should check this carefully.
+ */
+void sdp_free_attr (sdp_attr_t *attr_p)
+{
+ sdp_mca_t *cap_p;
+ sdp_attr_t *cpar_p;
+ sdp_attr_t *next_cpar_p;
+ int i;
+
+ /* If this is an X-cap/cdsc attr, free the cap_p structure and
+ * all X-cpar/cpar attributes. */
+ if ((attr_p->type == SDP_ATTR_X_CAP) ||
+ (attr_p->type == SDP_ATTR_CDSC)) {
+ cap_p = attr_p->attr.cap_p;
+ if (cap_p != NULL) {
+ for (cpar_p = cap_p->media_attrs_p; cpar_p != NULL;) {
+ next_cpar_p = cpar_p->next_p;
+ sdp_free_attr(cpar_p);
+ cpar_p = next_cpar_p;
+ }
+ SDP_FREE(cap_p);
+ }
+ } else if ((attr_p->type == SDP_ATTR_SDESCRIPTIONS) ||
+ (attr_p->type == SDP_ATTR_SRTP_CONTEXT)) {
+ SDP_FREE(attr_p->attr.srtp_context.session_parameters);
+ } else if (sdp_attr_is_long_string(attr_p->type)) {
+ SDP_FREE(attr_p->attr.stringp);
+ }
+
+ if (attr_p->type == SDP_ATTR_GROUP) {
+ for (i = 0; i < attr_p->attr.stream_data.num_group_id; i++) {
+ SDP_FREE(attr_p->attr.stream_data.group_ids[i]);
+ }
+ } else if (attr_p->type == SDP_ATTR_MSID_SEMANTIC) {
+ for (i = 0; i < SDP_MAX_MEDIA_STREAMS; ++i) {
+ SDP_FREE(attr_p->attr.msid_semantic.msids[i]);
+ }
+ }
+
+ /* Now free the actual attribute memory. */
+ SDP_FREE(attr_p);
+
+}
+
+
+/* Function: sdp_find_attr_list
+ * Description: Find the attribute list for the specified level and cap_num.
+ * Note: This is not an API for the application but an internal
+ * routine used by the SDP library.
+ * Parameters: sdp_p Pointer to the SDP to search.
+ * level The level to check for the attribute list.
+ * cap_num The capability number associated with the
+ * attribute list. If none, should be zero.
+ * attr_p Pointer to the attr list pointer. Will be
+ * filled in on return if successful.
+ * fname String function name calling this routine.
+ * Use for printing debug.
+ * Returns: SDP_SUCCESS
+ * SDP_INVALID_MEDIA_LEVEL
+ * SDP_INVALID_CAPABILITY
+ * SDP_FAILURE
+ */
+sdp_result_e sdp_find_attr_list (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_t **attr_p, char *fname)
+{
+ sdp_mca_t *mca_p;
+ sdp_mca_t *cap_p;
+ sdp_attr_t *cap_attr_p;
+
+ /* Initialize the attr pointer. */
+ *attr_p = NULL;
+
+ if (cap_num == 0) {
+ /* Find attribute list at the specified level. */
+ if (level == SDP_SESSION_LEVEL) {
+ *attr_p = sdp_p->sess_attrs_p;
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ *attr_p = mca_p->media_attrs_p;
+ }
+ } else {
+ /* Find the attr list for the capability specified. */
+ cap_attr_p = sdp_find_capability(sdp_p, level, cap_num);
+ if (cap_attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s, invalid capability %u at "
+ "level %u specified.", sdp_p->debug_str, fname,
+ (unsigned)cap_num, (unsigned)level);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_CAPABILITY);
+ }
+ cap_p = cap_attr_p->attr.cap_p;
+ *attr_p = cap_p->media_attrs_p;
+ }
+
+ return (SDP_SUCCESS);
+}
+
+/* Find fmtp inst_num with correct payload value or -1 for failure */
+int sdp_find_fmtp_inst (sdp_t *sdp_p, uint16_t level, uint16_t payload_num)
+{
+ uint16_t attr_count=0;
+ sdp_mca_t *mca_p;
+ sdp_attr_t *attr_p;
+
+ /* Attr is at a media level */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (-1);
+ }
+ for (attr_p = mca_p->media_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_FMTP) {
+ attr_count++;
+ if (attr_p->attr.fmtp.payload_num == payload_num) {
+ return (attr_count);
+ }
+ }
+ }
+
+ return (-1);
+
+}
+
+/* Function: sdp_find_attr
+ * Description: Find the specified attribute in an SDP structure.
+ * Note: This is not an API for the application but an internal
+ * routine used by the SDP library.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * attr_type The type of attribute to find.
+ * inst_num The instance num of the attribute to find.
+ * Range should be (1 - max num insts of this
+ * particular type of attribute at this level).
+ * Returns: Pointer to the attribute or NULL if not found.
+ */
+sdp_attr_t *sdp_find_attr (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_e attr_type, uint16_t inst_num)
+{
+ uint16_t attr_count=0;
+ sdp_mca_t *mca_p;
+ sdp_mca_t *cap_p;
+ sdp_attr_t *attr_p;
+
+ if (inst_num < 1) {
+ return (NULL);
+ }
+
+ if (cap_num == 0) {
+ if (level == SDP_SESSION_LEVEL) {
+ for (attr_p = sdp_p->sess_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == attr_type) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (attr_p);
+ }
+ }
+ }
+ } else { /* Attr is at a media level */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (NULL);
+ }
+ for (attr_p = mca_p->media_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == attr_type) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (attr_p);
+ }
+ }
+ }
+ } /* Attr is at a media level */
+ } else {
+ /* Attr is a capability X-cpar/cpar attribute. */
+ attr_p = sdp_find_capability(sdp_p, level, cap_num);
+ if (attr_p == NULL) {
+ return (NULL);
+ }
+ cap_p = attr_p->attr.cap_p;
+ /* Now find the specific attribute. */
+ for (attr_p = cap_p->media_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == attr_type) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (attr_p);
+ }
+ }
+ }
+ }
+
+ return (NULL);
+}
+
+/* Function: sdp_find_capability
+ * Description: Find the specified capability attribute in an SDP structure.
+ * Note: This is not an API for the application but an internal
+ * routine used by the SDP library.
+ * Parameters: sdp_p The SDP handle.
+ * level The level to check for the capability.
+ * cap_num The capability number to locate.
+ * Returns: Pointer to the capability attribute or NULL if not found.
+ */
+sdp_attr_t *sdp_find_capability (sdp_t *sdp_p, uint16_t level, uint8_t cap_num)
+{
+ uint8_t cur_cap_num=0;
+ sdp_mca_t *mca_p;
+ sdp_mca_t *cap_p;
+ sdp_attr_t *attr_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ for (attr_p = sdp_p->sess_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if ((attr_p->type == SDP_ATTR_X_CAP) ||
+ (attr_p->type == SDP_ATTR_CDSC)) {
+ cap_p = attr_p->attr.cap_p;
+ cur_cap_num += cap_p->num_payloads;
+ if (cap_num <= cur_cap_num) {
+ /* This is the right capability */
+ return (attr_p);
+ }
+ }
+ }
+ } else { /* Capability is at a media level */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (NULL);
+ }
+ for (attr_p = mca_p->media_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if ((attr_p->type == SDP_ATTR_X_CAP) ||
+ (attr_p->type == SDP_ATTR_CDSC)) {
+ cap_p = attr_p->attr.cap_p;
+ cur_cap_num += cap_p->num_payloads;
+ if (cap_num <= cur_cap_num) {
+ /* This is the right capability */
+ return (attr_p);
+ }
+ }
+ }
+ }
+
+ /* We didn't find the specified capability. */
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Unable to find specified capability (level %u, "
+ "cap_num %u).", sdp_p->debug_str, (unsigned)level, (unsigned)cap_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+}
+
+/* Function: sdp_attr_valid(sdp_t *sdp_p)
+ * Description: Returns true or false depending on whether the specified
+ * instance of the given attribute has been defined at the
+ * given level.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * attr_type The attribute type to validate.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_attr_valid (sdp_t *sdp_p, sdp_attr_e attr_type, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ if (sdp_find_attr(sdp_p, level, cap_num, attr_type, inst_num) == NULL) {
+ return (FALSE);
+ } else {
+ return (TRUE);
+ }
+}
+
+/* Function: sdp_attr_line_number(sdp_t *sdp_p)
+ * Description: Returns the line number this attribute appears on.
+ * Only works if the SDP was parsed rather than created
+ * locally.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * attr_type The attribute type to validate.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: line number, or 0 if an error
+ */
+uint32_t sdp_attr_line_number (sdp_t *sdp_p, sdp_attr_e attr_type, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, attr_type, inst_num);
+ if (attr_p == NULL) {
+ return 0;
+ } else {
+ return attr_p->line_number;
+ }
+}
+
+static boolean sdp_attr_is_simple_string(sdp_attr_e attr_type) {
+ if ((attr_type != SDP_ATTR_BEARER) &&
+ (attr_type != SDP_ATTR_CALLED) &&
+ (attr_type != SDP_ATTR_CONN_TYPE) &&
+ (attr_type != SDP_ATTR_DIALED) &&
+ (attr_type != SDP_ATTR_DIALING) &&
+ (attr_type != SDP_ATTR_FRAMING) &&
+ (attr_type != SDP_ATTR_MID) &&
+ (attr_type != SDP_ATTR_X_SIDIN) &&
+ (attr_type != SDP_ATTR_X_SIDOUT)&&
+ (attr_type != SDP_ATTR_X_CONFID) &&
+ (attr_type != SDP_ATTR_LABEL) &&
+ (attr_type != SDP_ATTR_ICE_OPTIONS) &&
+ (attr_type != SDP_ATTR_IMAGEATTR) &&
+ (attr_type != SDP_ATTR_SIMULCAST) &&
+ (attr_type != SDP_ATTR_RID)) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/* Function: sdp_attr_get_simple_string
+ * Description: Returns a pointer to a string attribute parameter. This
+ * routine can only be called for attributes that have just
+ * one string parameter. The value is returned as a const
+ * ptr and so cannot be modified by the application. If the
+ * given attribute is not defined, NULL will be returned.
+ * Attributes with a simple string parameter currently include:
+ * bearer, called, connection_type, dialed, dialing, direction
+ * and framing.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * attr_type The simple string attribute type.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Pointer to the parameter value.
+ */
+const char *sdp_attr_get_simple_string (sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (!sdp_attr_is_simple_string(attr_type)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute type is not a simple string (%s)",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type));
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ }
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, attr_type, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute %s, level %u instance %u not found.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type),
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.string_val);
+ }
+}
+
+static boolean sdp_attr_is_long_string(sdp_attr_e attr_type) {
+ return (attr_type == SDP_ATTR_IDENTITY || attr_type == SDP_ATTR_DTLS_MESSAGE);
+}
+
+/* Identical in usage to sdp_attr_get_simple_string() */
+const char *sdp_attr_get_long_string (sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (!sdp_attr_is_long_string(attr_type)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute type is not a long string (%s)",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type));
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ }
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, attr_type, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute %s, level %u instance %u not found.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type),
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.stringp);
+ }
+}
+
+static boolean sdp_attr_is_simple_u32(sdp_attr_e attr_type) {
+ if ((attr_type != SDP_ATTR_EECID) &&
+ (attr_type != SDP_ATTR_PTIME) &&
+ (attr_type != SDP_ATTR_MAXPTIME) &&
+ (attr_type != SDP_ATTR_T38_VERSION) &&
+ (attr_type != SDP_ATTR_T38_MAXBITRATE) &&
+ (attr_type != SDP_ATTR_T38_MAXBUFFER) &&
+ (attr_type != SDP_ATTR_T38_MAXDGRAM) &&
+ (attr_type != SDP_ATTR_X_SQN) &&
+ (attr_type != SDP_ATTR_TC1_PAYLOAD_BYTES) &&
+ (attr_type != SDP_ATTR_TC1_WINDOW_SIZE) &&
+ (attr_type != SDP_ATTR_TC2_PAYLOAD_BYTES) &&
+ (attr_type != SDP_ATTR_TC2_WINDOW_SIZE) &&
+ (attr_type != SDP_ATTR_FRAMERATE)) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Function: sdp_attr_get_simple_u32
+ * Description: Returns an unsigned 32-bit attribute parameter. This
+ * routine can only be called for attributes that have just
+ * one uint32_t parameter. If the given attribute is not defined,
+ * zero will be returned. There is no way for the application
+ * to determine if zero is the actual value or the attribute
+ * wasn't defined, so the application must use the
+ * sdp_attr_valid function to determine this.
+ * Attributes with a simple uint32_t parameter currently include:
+ * eecid, ptime, T38FaxVersion, T38maxBitRate, T38FaxMaxBuffer,
+ * T38FaxMaxDatagram, X-sqn, TC1PayloadBytes, TC1WindowSize,
+ * TC2PayloadBytes, TC2WindowSize, rtcp.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * attr_type The simple uint32_t attribute type.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: uint32_t parameter value.
+ */
+uint32_t sdp_attr_get_simple_u32 (sdp_t *sdp_p, sdp_attr_e attr_type, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (!sdp_attr_is_simple_u32(attr_type)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute type is not a simple uint32_t (%s)",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type));
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ }
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, attr_type, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute %s, level %u instance %u not found.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type),
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.u32_val);
+ }
+}
+
+/* Function: sdp_attr_get_simple_boolean
+ * Description: Returns a boolean attribute parameter. This
+ * routine can only be called for attributes that have just
+ * one boolean parameter. If the given attribute is not defined,
+ * FALSE will be returned. There is no way for the application
+ * to determine if FALSE is the actual value or the attribute
+ * wasn't defined, so the application must use the
+ * sdp_attr_valid function to determine this.
+ * Attributes with a simple boolean parameter currently include:
+ * T38FaxFillBitRemoval, T38FaxTranscodingMMR,
+ * T38FaxTranscodingJBIG, and TMRGwXid.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * attr_type The simple boolean attribute type.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Boolean value.
+ */
+tinybool sdp_attr_get_simple_boolean (sdp_t *sdp_p, sdp_attr_e attr_type,
+ uint16_t level, uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if ((attr_type != SDP_ATTR_T38_FILLBITREMOVAL) &&
+ (attr_type != SDP_ATTR_T38_TRANSCODINGMMR) &&
+ (attr_type != SDP_ATTR_T38_TRANSCODINGJBIG) &&
+ (attr_type != SDP_ATTR_TMRGWXID)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute type is not a simple boolean (%s)",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type));
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ }
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, attr_type, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute %s, level %u instance %u not found.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type),
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.boolean_val);
+ }
+}
+
+/*
+ * sdp_attr_get_maxprate
+ *
+ * This function is used by the application layer to get the packet-rate
+ * within the maxprate attribute.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to set.
+ *
+ * Returns a pointer to a constant char array that stores the packet-rate,
+ * OR null if the attribute does not exist.
+ */
+const char*
+sdp_attr_get_maxprate (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_MAXPRATE, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Attribute %s, level %u instance %u not found.",
+ sdp_p->debug_str, sdp_get_attr_name(SDP_ATTR_MAXPRATE),
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.string_val);
+ }
+}
+
+/* Function: sdp_attr_get_t38ratemgmt
+ * Description: Returns the value of the t38ratemgmt attribute
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_T38_UNKNOWN_RATE is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Ratemgmt value.
+ */
+sdp_t38_ratemgmt_e sdp_attr_get_t38ratemgmt (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_T38_RATEMGMT, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s t38ratemgmt attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_T38_UNKNOWN_RATE);
+ } else {
+ return (attr_p->attr.t38ratemgmt);
+ }
+}
+
+/* Function: sdp_attr_get_t38udpec
+ * Description: Returns the value of the t38udpec attribute
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_T38_UDPEC_UNKNOWN is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: UDP EC value.
+ */
+sdp_t38_udpec_e sdp_attr_get_t38udpec (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_T38_UDPEC, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s t38udpec attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_T38_UDPEC_UNKNOWN);
+ } else {
+ return (attr_p->attr.t38udpec);
+ }
+}
+
+/* Function: sdp_get_media_direction
+ * Description: Determines the direction defined for a given level. The
+ * direction will be inactive, sendonly, recvonly, or sendrecv
+ * as determined by the last of these attributes specified at
+ * the given level. If none of these attributes are specified,
+ * the direction will be sendrecv by default.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * Returns: An SDP direction enum value.
+ */
+sdp_direction_e sdp_get_media_direction (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num)
+{
+ sdp_mca_t *mca_p;
+ sdp_attr_t *attr_p;
+ sdp_direction_e direction = SDP_DIRECTION_SENDRECV;
+
+ if (cap_num == 0) {
+ /* Find the pointer to the attr list for this level. */
+ if (level == SDP_SESSION_LEVEL) {
+ attr_p = sdp_p->sess_attrs_p;
+ } else { /* Attr is at a media level */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (direction);
+ }
+ attr_p = mca_p->media_attrs_p;
+ }
+
+ /* Scan for direction oriented attributes. Last one wins. */
+ for (; attr_p != NULL; attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_INACTIVE) {
+ direction = SDP_DIRECTION_INACTIVE;
+ } else if (attr_p->type == SDP_ATTR_SENDONLY) {
+ direction = SDP_DIRECTION_SENDONLY;
+ } else if (attr_p->type == SDP_ATTR_RECVONLY) {
+ direction = SDP_DIRECTION_RECVONLY;
+ } else if (attr_p->type == SDP_ATTR_SENDRECV) {
+ direction = SDP_DIRECTION_SENDRECV;
+ }
+ }
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Warning: Invalid cap_num for media direction.",
+ sdp_p->debug_str);
+ }
+ }
+
+ return (direction);
+}
+
+/* Since there are four different attribute names which all have the same
+ * qos parameters, all of these attributes are accessed through this same
+ * set of APIs. To distinguish between specific attributes, the application
+ * must also pass the attribute type. The attribute must be one of:
+ * SDP_ATTR_QOS, SDP_ATTR_SECURE, SDP_ATTR_X_PC_QOS, and SDP_ATTR_X_QOS.
+ */
+tinybool sdp_validate_qos_attr (sdp_attr_e qos_attr)
+{
+ if ((qos_attr == SDP_ATTR_QOS) ||
+ (qos_attr == SDP_ATTR_SECURE) ||
+ (qos_attr == SDP_ATTR_X_PC_QOS) ||
+ (qos_attr == SDP_ATTR_X_QOS) ||
+ (qos_attr == SDP_ATTR_CURR) ||
+ (qos_attr == SDP_ATTR_DES) ||
+ (qos_attr == SDP_ATTR_CONF)){
+ return (TRUE);
+ } else {
+ return (FALSE);
+ }
+}
+
+/* Function: sdp_attr_get_qos_strength
+ * Description: Returns the value of the qos attribute strength
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_QOS_STRENGTH_UNKNOWN is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: Qos strength value.
+ */
+sdp_qos_strength_e sdp_attr_get_qos_strength (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (sdp_validate_qos_attr(qos_attr) == FALSE) {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Warning: Invalid QOS attribute specified for"
+ "get qos strength.", sdp_p->debug_str);
+ }
+ return (SDP_QOS_STRENGTH_UNKNOWN);
+ }
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_QOS_STRENGTH_UNKNOWN);
+ } else {
+ switch (qos_attr) {
+ case SDP_ATTR_QOS:
+ return (attr_p->attr.qos.strength);
+ case SDP_ATTR_DES:
+ return (attr_p->attr.des.strength);
+ default:
+ return SDP_QOS_STRENGTH_UNKNOWN;
+
+ }
+ }
+}
+
+/* Function: sdp_attr_get_qos_direction
+ * Description: Returns the value of the qos attribute direction
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_QOS_DIR_UNKNOWN is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: Qos direction value.
+ */
+sdp_qos_dir_e sdp_attr_get_qos_direction (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (sdp_validate_qos_attr(qos_attr) == FALSE) {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Warning: Invalid QOS attribute specified "
+ "for get qos direction.", sdp_p->debug_str);
+ }
+ return (SDP_QOS_DIR_UNKNOWN);
+ }
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_QOS_DIR_UNKNOWN);
+ } else {
+ switch (qos_attr) {
+ case SDP_ATTR_QOS:
+ return (attr_p->attr.qos.direction);
+ case SDP_ATTR_CURR:
+ return (attr_p->attr.curr.direction);
+ case SDP_ATTR_DES:
+ return (attr_p->attr.des.direction);
+ case SDP_ATTR_CONF:
+ return (attr_p->attr.conf.direction);
+ default:
+ return SDP_QOS_DIR_UNKNOWN;
+
+ }
+ }
+}
+
+/* Function: sdp_attr_get_qos_status_type
+ * Description: Returns the value of the qos attribute status_type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_QOS_STATUS_TYPE_UNKNOWN is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: Qos direction value.
+ */
+sdp_qos_status_types_e sdp_attr_get_qos_status_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (sdp_validate_qos_attr(qos_attr) == FALSE) {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Warning: Invalid QOS attribute specified "
+ "for get qos status_type.", sdp_p->debug_str);
+ }
+ return (SDP_QOS_STATUS_TYPE_UNKNOWN);
+ }
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_QOS_STATUS_TYPE_UNKNOWN);
+ } else {
+ switch (qos_attr) {
+ case SDP_ATTR_CURR:
+ return (attr_p->attr.curr.status_type);
+ case SDP_ATTR_DES:
+ return (attr_p->attr.des.status_type);
+ case SDP_ATTR_CONF:
+ return (attr_p->attr.conf.status_type);
+ default:
+ return SDP_QOS_STATUS_TYPE_UNKNOWN;
+
+ }
+ }
+}
+
+/* Function: sdp_attr_get_qos_confirm
+ * Description: Returns the value of the qos attribute confirm
+ * parameter specified for the given attribute. Returns TRUE if
+ * the confirm parameter is specified.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: Boolean value.
+ */
+tinybool sdp_attr_get_qos_confirm (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ if (sdp_validate_qos_attr(qos_attr) == FALSE) {
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Warning: Invalid QOS attribute specified "
+ "for get qos confirm.", sdp_p->debug_str);
+ }
+ return (FALSE);
+ }
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.qos.confirm);
+ }
+}
+
+/* Function: sdp_attr_get_curr_type
+ * Description: Returns the value of the curr attribute status_type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_CURR_UNKNOWN_TYPE is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: Curr type value.
+ */
+sdp_curr_type_e sdp_attr_get_curr_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_CURR_UNKNOWN_TYPE);
+ } else {
+ return (attr_p->attr.curr.type);
+ }
+}
+
+/* Function: sdp_attr_get_des_type
+ * Description: Returns the value of the des attribute status_type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_DES_UNKNOWN_TYPE is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: DES type value.
+ */
+sdp_des_type_e sdp_attr_get_des_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_DES_UNKNOWN_TYPE);
+ } else {
+ return (attr_p->attr.des.type);
+ }
+}
+
+/* Function: sdp_attr_get_conf_type
+ * Description: Returns the value of the des attribute status_type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_CONF_UNKNOWN_TYPE is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * qos_attr The specific type of qos attribute. May be
+ * qos, secure, X-pc-qos, or X-qos.
+ * inst_num The attribute instance number to check.
+ * Returns: CONF type value.
+ */
+sdp_conf_type_e sdp_attr_get_conf_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e qos_attr, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, qos_attr, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(qos_attr), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_CONF_UNKNOWN_TYPE);
+ } else {
+ return (attr_p->attr.conf.type);
+ }
+}
+
+/* Function: sdp_attr_get_subnet_nettype
+ * Description: Returns the value of the subnet attribute network type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_NT_INVALID is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Nettype value.
+ */
+sdp_nettype_e sdp_attr_get_subnet_nettype (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SUBNET, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Subnet attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_NT_INVALID);
+ } else {
+ return (attr_p->attr.subnet.nettype);
+ }
+}
+
+/* Function: sdp_attr_get_subnet_addrtype
+ * Description: Returns the value of the subnet attribute address type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_AT_INVALID is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Addrtype value.
+ */
+sdp_addrtype_e sdp_attr_get_subnet_addrtype (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SUBNET, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Subnet attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_AT_INVALID);
+ } else {
+ return (attr_p->attr.subnet.addrtype);
+ }
+}
+
+/* Function: sdp_attr_get_subnet_addr
+ * Description: Returns the value of the subnet attribute address
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, NULL is returned. Value is
+ * returned as a const ptr and so cannot be modified by the
+ * application.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Pointer to address or NULL.
+ */
+const char *sdp_attr_get_subnet_addr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SUBNET, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Subnet attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.subnet.addr);
+ }
+}
+
+/* Function: sdp_attr_get_subnet_prefix
+ * Description: Returns the value of the subnet attribute prefix
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_INVALID_PARAM is returned.
+ * Note that this is value is defined to be (-2) and is
+ * different from the return code SDP_INVALID_PARAMETER.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Prefix value or SDP_INVALID_PARAM.
+ */
+int32_t sdp_attr_get_subnet_prefix (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SUBNET, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Subnet attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.subnet.prefix);
+ }
+}
+
+/* Function: sdp_attr_rtpmap_payload_valid
+ * Description: Returns true or false depending on whether an rtpmap
+ * attribute was specified with the given payload value
+ * at the given level. If it was, the instance number of
+ * that attribute is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number of the attribute
+ * found is returned via this param.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_attr_rtpmap_payload_valid (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t *inst_num, uint16_t payload_type)
+{
+ uint16_t i;
+ sdp_attr_t *attr_p;
+ uint16_t num_instances;
+
+ *inst_num = 0;
+
+ if (sdp_attr_num_instances(sdp_p, level, cap_num,
+ SDP_ATTR_RTPMAP, &num_instances) != SDP_SUCCESS) {
+ return (FALSE);
+ }
+
+ for (i=1; i <= num_instances; i++) {
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_RTPMAP, i);
+ if ((attr_p != NULL) &&
+ (attr_p->attr.transport_map.payload_num == payload_type)) {
+ *inst_num = i;
+ return (TRUE);
+ }
+ }
+
+ return (FALSE);
+}
+
+/* Function: sdp_attr_get_rtpmap_payload_type
+ * Description: Returns the value of the rtpmap attribute payload type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Payload type value.
+ */
+uint16_t sdp_attr_get_rtpmap_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_RTPMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtpmap attribute, level %u instance %u "
+ "not found.",
+ sdp_p->debug_str,
+ (unsigned)level,
+ (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.transport_map.payload_num);
+ }
+}
+
+/* Function: sdp_attr_get_rtpmap_encname
+ * Description: Returns a pointer to the value of the encoding name
+ * parameter specified for the given attribute. Value is
+ * returned as a const ptr and so cannot be modified by the
+ * application. If the given attribute is not defined, NULL
+ * will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Codec value or SDP_CODEC_INVALID.
+ */
+const char *sdp_attr_get_rtpmap_encname (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_RTPMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtpmap attribute, level %u instance %u "
+ "not found.",
+ sdp_p->debug_str,
+ (unsigned)level,
+ (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.transport_map.encname);
+ }
+}
+
+/* Function: sdp_attr_get_rtpmap_clockrate
+ * Description: Returns the value of the rtpmap attribute clockrate
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Clockrate value.
+ */
+uint32_t sdp_attr_get_rtpmap_clockrate (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_RTPMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtpmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.transport_map.clockrate);
+ }
+}
+
+/* Function: sdp_attr_get_rtpmap_num_chan
+ * Description: Returns the value of the rtpmap attribute num_chan
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Number of channels param or zero.
+ */
+uint16_t sdp_attr_get_rtpmap_num_chan (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_RTPMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtpmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.transport_map.num_chan);
+ }
+}
+
+/* Function: sdp_attr_get_ice_attribute
+ * Description: Returns the value of an ice attribute at a given level
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * ice_attrib Returns an ice attrib string
+ * Returns:
+ * SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_SDP_PTR SDP pointer invalid
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+
+sdp_result_e sdp_attr_get_ice_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e sdp_attr, uint16_t inst_num,
+ char **out)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, sdp_attr, inst_num);
+ if (attr_p != NULL) {
+ *out = attr_p->attr.ice_attr;
+ return (SDP_SUCCESS);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s ice attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+}
+
+/* Function: sdp_attr_is_present
+ * Description: Returns a boolean value based on attribute being present or
+ * not
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * attr_type The attribute type.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * Returns:
+ * Boolean value.
+ */
+
+tinybool sdp_attr_is_present (sdp_t *sdp_p, sdp_attr_e attr_type, uint16_t level,
+ uint8_t cap_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, attr_type, 1);
+ if (attr_p != NULL) {
+ return (TRUE);
+ }
+ if (sdp_p->debug_flag[SDP_DEBUG_WARNINGS]) {
+ CSFLogDebug(logTag, "%s Attribute %s, level %u not found.",
+ sdp_p->debug_str, sdp_get_attr_name(attr_type), level);
+ }
+
+ return (FALSE);
+}
+
+
+
+/* Function: sdp_attr_get_rtcp_mux_attribute
+ * Description: Returns the value of an rtcp-mux attribute at a given level
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * rtcp_mux Returns an rtcp-mux attrib bool
+ * Returns:
+ * SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_SDP_PTR SDP pointer invalid
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e sdp_attr_get_rtcp_mux_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e sdp_attr, uint16_t inst_num,
+ tinybool *rtcp_mux)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, sdp_attr, inst_num);
+ if (attr_p != NULL) {
+ *rtcp_mux = attr_p->attr.boolean_val;
+ return (SDP_SUCCESS);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp-mux attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+}
+
+/* Function: sdp_attr_get_setup_attribute
+ * Description: Returns the value of a setup attribute at a given level
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * setup_type Returns sdp_setup_type_e enum
+ * Returns:
+ * SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_SDP_PTR SDP pointer invalid
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e sdp_attr_get_setup_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, sdp_setup_type_e *setup_type)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SETUP, inst_num);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag,
+ "%s setup attribute, level %u instance %u not found.",
+ sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ *setup_type = attr_p->attr.setup;
+ return SDP_SUCCESS;
+}
+
+/* Function: sdp_attr_get_connection_attribute
+ * Description: Returns the value of a connection attribute at a given level
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * connection_type Returns sdp_connection_type_e enum
+ * Returns:
+ * SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_SDP_PTR SDP pointer invalid
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e sdp_attr_get_connection_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, sdp_connection_type_e *connection_type)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_CONNECTION,
+ inst_num);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag,
+ "%s setup attribute, level %u instance %u not found.",
+ sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ *connection_type = attr_p->attr.connection;
+ return SDP_SUCCESS;
+}
+
+/* Function: sdp_attr_get_dtls_fingerprint_attribute
+ * Description: Returns the value of dtls fingerprint attribute at a given level
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * dtls_fingerprint Returns an dtls fingerprint attrib string
+ * Returns:
+ * SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_SDP_PTR SDP pointer invalid
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e sdp_attr_get_dtls_fingerprint_attribute (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, sdp_attr_e sdp_attr, uint16_t inst_num,
+ char **out)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, sdp_attr, inst_num);
+ if (attr_p != NULL) {
+ *out = attr_p->attr.string_val;
+ return (SDP_SUCCESS);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s dtls fingerprint attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+}
+
+/* Function: sdp_attr_sprtmap_payload_valid
+ * Description: Returns true or false depending on whether an sprtmap
+ * attribute was specified with the given payload value
+ * at the given level. If it was, the instance number of
+ * that attribute is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number of the attribute
+ * found is returned via this param.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_attr_sprtmap_payload_valid (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t *inst_num, uint16_t payload_type)
+{
+ uint16_t i;
+ sdp_attr_t *attr_p;
+ uint16_t num_instances;
+
+ *inst_num = 0;
+
+ if (sdp_attr_num_instances(sdp_p, level, cap_num,
+ SDP_ATTR_SPRTMAP, &num_instances) != SDP_SUCCESS) {
+ return (FALSE);
+ }
+
+ for (i=1; i <= num_instances; i++) {
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SPRTMAP, i);
+ if ((attr_p != NULL) &&
+ (attr_p->attr.transport_map.payload_num == payload_type)) {
+ *inst_num = i;
+ return (TRUE);
+ }
+ }
+
+ return (FALSE);
+}
+
+/* Function: sdp_attr_get_sprtmap_payload_type
+ * Description: Returns the value of the sprtmap attribute payload type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Payload type value.
+ */
+uint16_t sdp_attr_get_sprtmap_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SPRTMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sprtmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.transport_map.payload_num);
+ }
+}
+
+/* Function: sdp_attr_get_sprtmap_encname
+ * Description: Returns a pointer to the value of the encoding name
+ * parameter specified for the given attribute. Value is
+ * returned as a const ptr and so cannot be modified by the
+ * application. If the given attribute is not defined, NULL
+ * will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Codec value or SDP_CODEC_INVALID.
+ */
+const char *sdp_attr_get_sprtmap_encname (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SPRTMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sprtmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.transport_map.encname);
+ }
+}
+
+/* Function: sdp_attr_get_sprtmap_clockrate
+ * Description: Returns the value of the sprtmap attribute clockrate
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Clockrate value.
+ */
+uint32_t sdp_attr_get_sprtmap_clockrate (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SPRTMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sprtmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.transport_map.clockrate);
+ }
+}
+
+/* Function: sdp_attr_get_sprtmap_num_chan
+ * Description: Returns the value of the sprtmap attribute num_chan
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Number of channels param or zero.
+ */
+uint16_t sdp_attr_get_sprtmap_num_chan (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SPRTMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sprtmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.transport_map.num_chan);
+ }
+}
+
+/* Note: The fmtp attribute formats currently handled are:
+ * fmtp:<payload type> <event>,<event>...
+ * fmtp:<payload_type> [annexa=yes/no] [annexb=yes/no] [bitrate=<value>]
+ * where "value" is a numeric value > 0
+ * where each event is a single number or a range separated
+ * by a '-'.
+ * Example: fmtp:101 1,3-15,20
+ */
+
+/* Function: tinybool sdp_attr_fmtp_valid(sdp_t *sdp_p)
+ * Description: Returns true or false depending on whether an fmtp
+ * attribute was specified with the given payload value
+ * at the given level. If it was, the instance number of
+ * that attribute is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_attr_fmtp_payload_valid (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t *inst_num, uint16_t payload_type)
+{
+ uint16_t i;
+ sdp_attr_t *attr_p;
+ uint16_t num_instances;
+
+ if (sdp_attr_num_instances(sdp_p, level, cap_num,
+ SDP_ATTR_FMTP, &num_instances) != SDP_SUCCESS) {
+ return (FALSE);
+ }
+
+ for (i=1; i <= num_instances; i++) {
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, i);
+ if ((attr_p != NULL) &&
+ (attr_p->attr.fmtp.payload_num == payload_type)) {
+ *inst_num = i;
+ return (TRUE);
+ }
+ }
+
+ return (FALSE);
+}
+
+/* Function: sdp_attr_get_fmtp_payload_type
+ * Description: Returns the value of the fmtp attribute payload type
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Payload type value.
+ */
+uint16_t sdp_attr_get_fmtp_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.fmtp.payload_num);
+ }
+}
+
+
+/* Function: sdp_attr_fmtp_is_range_set
+ * Description: Determines if a range of events is set in an fmtp attribute.
+ * The overall range for events is 0-255.
+ * This will return either FULL_MATCH, PARTIAL_MATCH, or NO_MATCH
+ * depending on whether all, some, or none of the specified
+ * events are defined. If the given attribute is not defined,
+ * NO_MATCH will be returned. It is up to the appl to verify
+ * the validity of the attribute before calling this routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * low_val Low value of the range. Range is 0-255.
+ * high_val High value of the range.
+ * Returns: SDP_FULL_MATCH, SDP_PARTIAL_MATCH, SDP_NO_MATCH
+ */
+sdp_ne_res_e sdp_attr_fmtp_is_range_set (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint8_t low_val, uint8_t high_val)
+{
+ uint16_t i;
+ uint32_t mapword;
+ uint32_t bmap;
+ uint32_t num_vals = 0;
+ uint32_t num_vals_set = 0;
+ sdp_attr_t *attr_p;
+ sdp_fmtp_t *fmtp_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_NO_MATCH);
+ }
+
+ fmtp_p = &(attr_p->attr.fmtp);
+ for (i = low_val; i <= high_val; i++) {
+ num_vals++;
+ mapword = i/SDP_NE_BITS_PER_WORD;
+ bmap = SDP_NE_BIT_0 << (i%32);
+ if (fmtp_p->bmap[ mapword ] & bmap) {
+ num_vals_set++;
+ }
+ }
+
+ if (num_vals == num_vals_set) {
+ return (SDP_FULL_MATCH);
+ } else if (num_vals_set == 0) {
+ return (SDP_NO_MATCH);
+ } else {
+ return (SDP_PARTIAL_MATCH);
+ }
+}
+
+/* Function: sdp_attr_fmtp_valid
+ * Description: Determines the validity of the events in the fmtp.
+ * The overall range for events is 0-255.
+ * The user passes an event list with valid events supported by Appl.
+ * This routine will do a simple AND comparison and report the result.
+ *
+ * This will return TRUE if ftmp events are valid, and FALSE otherwise.
+ * It is up to the appl to verify the validity of the attribute
+ * before calling this routine.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * appl_maxval Max event value supported by Appl. Range is 0-255.
+ * evt_array Bitmap containing events supported by application.
+ * Returns: TRUE, FALSE
+ */
+tinybool
+sdp_attr_fmtp_valid(sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint16_t appl_maxval, uint32_t* evt_array)
+{
+ uint16_t i;
+ uint32_t mapword;
+ sdp_attr_t *attr_p;
+ sdp_fmtp_t *fmtp_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return FALSE;
+ }
+
+ fmtp_p = &(attr_p->attr.fmtp);
+
+ /* Do quick test. If application max value is lower than fmtp's then error */
+ if (fmtp_p->maxval > appl_maxval)
+ return FALSE;
+
+ /* Ok, events are within range. Now check that only
+ * allowed events have been received
+ */
+ mapword = appl_maxval/SDP_NE_BITS_PER_WORD;
+ for (i=0; i<mapword; i++) {
+ if (fmtp_p->bmap[i] & ~(evt_array[i])) {
+ /* Remote SDP is requesting events not supported by Application */
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/* Function: sdp_attr_set_fmtp_payload_type
+ * Description: Sets the value of the fmtp attribute payload type parameter
+ * for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * payload_type New payload type value.
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e sdp_attr_set_fmtp_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint16_t payload_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ attr_p->attr.fmtp.payload_num = payload_num;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_range
+ * Description: Get a range of named events for an fmtp attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * bmap The 8 word data array holding the bitmap
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_attr_get_fmtp_range (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint32_t *bmap)
+{
+ sdp_attr_t *attr_p;
+ sdp_fmtp_t *fmtp_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ fmtp_p = &(attr_p->attr.fmtp);
+ memcpy(bmap, fmtp_p->bmap, SDP_NE_NUM_BMAP_WORDS * sizeof(uint32_t) );
+
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_clear_fmtp_range
+ * Description: Clear a range of named events for an fmtp attribute. The low
+ * value specified must be <= the high value.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * low_val The low value of the range. Range is 0-255
+ * high_val The high value of the range. May be == low.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_attr_clear_fmtp_range (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint8_t low_val, uint8_t high_val)
+{
+ uint16_t i;
+ uint32_t mapword;
+ uint32_t bmap;
+ sdp_attr_t *attr_p;
+ sdp_fmtp_t *fmtp_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ fmtp_p = &(attr_p->attr.fmtp);
+ for (i = low_val; i <= high_val; i++) {
+ mapword = i/SDP_NE_BITS_PER_WORD;
+ bmap = SDP_NE_BIT_0 << (i%32);
+ fmtp_p->bmap[ mapword ] &= ~bmap;
+ }
+ if (high_val > fmtp_p->maxval) {
+ fmtp_p->maxval = high_val;
+ }
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_compare_fmtp_ranges
+ * Description: Compare the named events set of two fmtp attributes. If all
+ * events are the same (either set or not), FULL_MATCH will be
+ * returned. If no events match, NO_MATCH will be returned.
+ * Otherwise PARTIAL_MATCH will be returned. If either attr is
+ * invalid, NO_MATCH will be returned.
+ * Parameters: src_sdp_p The SDP handle returned by sdp_init_description.
+ * dst_sdp_p The SDP handle returned by sdp_init_description.
+ * src_level The level of the src fmtp attribute.
+ * dst_level The level to the dst fmtp attribute.
+ * src_cap_num The capability number of the src attr.
+ * dst_cap_num The capability number of the dst attr.
+ * src_inst_numh The attribute instance of the src attr.
+ * dst_inst_numh The attribute instance of the dst attr.
+ * Returns: SDP_FULL_MATCH, SDP_PARTIAL_MATCH, SDP_NO_MATCH.
+ */
+sdp_ne_res_e sdp_attr_compare_fmtp_ranges (sdp_t *src_sdp_p,sdp_t *dst_sdp_p,
+ uint16_t src_level, uint16_t dst_level,
+ uint8_t src_cap_num, uint8_t dst_cap_num,
+ uint16_t src_inst_num, uint16_t dst_inst_num)
+{
+ uint16_t i,j;
+ uint32_t bmap;
+ uint32_t num_vals_match = 0;
+ sdp_attr_t *src_attr_p;
+ sdp_attr_t *dst_attr_p;
+ sdp_fmtp_t *src_fmtp_p;
+ sdp_fmtp_t *dst_fmtp_p;
+
+ src_attr_p = sdp_find_attr(src_sdp_p, src_level, src_cap_num,
+ SDP_ATTR_FMTP, src_inst_num);
+ dst_attr_p = sdp_find_attr(dst_sdp_p, dst_level, dst_cap_num,
+ SDP_ATTR_FMTP, dst_inst_num);
+ if ((src_attr_p == NULL) || (dst_attr_p == NULL)) {
+ if (src_sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s source or destination fmtp attribute for "
+ "compare not found.", src_sdp_p->debug_str);
+ }
+ src_sdp_p->conf_p->num_invalid_param++;
+ return (SDP_NO_MATCH);
+ }
+
+ src_fmtp_p = &(src_attr_p->attr.fmtp);
+ dst_fmtp_p = &(dst_attr_p->attr.fmtp);
+ for (i = 0; i < SDP_NE_NUM_BMAP_WORDS; i++) {
+ for (j = 0; j < SDP_NE_BITS_PER_WORD; j++) {
+ bmap = SDP_NE_BIT_0 << j;
+ if ((src_fmtp_p->bmap[i] & bmap) && (dst_fmtp_p->bmap[i] & bmap)) {
+ num_vals_match++;
+ } else if ((!(src_fmtp_p->bmap[i] & bmap)) &&
+ (!(dst_fmtp_p->bmap[i] & bmap))) {
+ num_vals_match++;
+ }
+ }
+ }
+
+ if (num_vals_match == (SDP_NE_NUM_BMAP_WORDS * SDP_NE_BITS_PER_WORD)) {
+ return (SDP_FULL_MATCH);
+ } else if (num_vals_match == 0) {
+ return (SDP_NO_MATCH);
+ } else {
+ return (SDP_PARTIAL_MATCH);
+ }
+}
+
+/* Function: sdp_attr_copy_fmtp_ranges
+ * Description: Copy the named events set for one fmtp attribute to another.
+ * Parameters: src_sdp_p The SDP handle returned by sdp_init_description.
+ * dst_sdp_p The SDP handle returned by sdp_init_description.
+ * src_level The level of the src fmtp attribute.
+ * dst_level The level to the dst fmtp attribute.
+ * src_cap_num The capability number of the src attr.
+ * dst_cap_num The capability number of the dst attr.
+ * src_inst_numh The attribute instance of the src attr.
+ * dst_inst_numh The attribute instance of the dst attr.
+ * Returns: SDP_SUCCESS
+ */
+sdp_result_e sdp_attr_copy_fmtp_ranges (sdp_t *src_sdp_p, sdp_t *dst_sdp_p,
+ uint16_t src_level, uint16_t dst_level,
+ uint8_t src_cap_num, uint8_t dst_cap_num,
+ uint16_t src_inst_num, uint16_t dst_inst_num)
+{
+ uint16_t i;
+ sdp_attr_t *src_attr_p;
+ sdp_attr_t *dst_attr_p;
+ sdp_fmtp_t *src_fmtp_p;
+ sdp_fmtp_t *dst_fmtp_p;
+
+ if (!src_sdp_p || !dst_sdp_p) {
+ return (SDP_INVALID_SDP_PTR);
+ }
+
+ src_attr_p = sdp_find_attr(src_sdp_p, src_level, src_cap_num,
+ SDP_ATTR_FMTP, src_inst_num);
+ dst_attr_p = sdp_find_attr(dst_sdp_p, dst_level, dst_cap_num,
+ SDP_ATTR_FMTP, dst_inst_num);
+ if ((src_attr_p == NULL) || (dst_attr_p == NULL)) {
+ if (src_sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s source or destination fmtp attribute for "
+ "copy not found.", src_sdp_p->debug_str);
+ }
+ src_sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ src_fmtp_p = &(src_attr_p->attr.fmtp);
+ dst_fmtp_p = &(dst_attr_p->attr.fmtp);
+ dst_fmtp_p->maxval = src_fmtp_p->maxval;
+ for (i = 0; i < SDP_NE_NUM_BMAP_WORDS; i++) {
+ dst_fmtp_p->bmap[i] = src_fmtp_p->bmap[i];
+ }
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_get_fmtp_mode
+ * Description: Gets the value of the fmtp attribute mode parameter
+ * for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * payload_type payload type.
+ * Returns: mode value or zero if mode attribute not found
+ */
+uint32_t sdp_attr_get_fmtp_mode_for_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint32_t payload_type)
+{
+ uint16_t num_a_lines = 0;
+ int i;
+ sdp_attr_t *attr_p;
+
+ /*
+ * Get number of FMTP attributes for the AUDIO line
+ */
+ (void) sdp_attr_num_instances(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ &num_a_lines);
+ for (i = 0; i < num_a_lines; i++) {
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, (uint16_t) (i + 1));
+ if ((attr_p != NULL) &&
+ (attr_p->attr.fmtp.payload_num == (uint16_t)payload_type)) {
+ if (attr_p->attr.fmtp.fmtp_format == SDP_FMTP_MODE) {
+ return attr_p->attr.fmtp.mode;
+ }
+ }
+ }
+ return 0;
+}
+
+sdp_result_e sdp_attr_set_fmtp_max_fs (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t max_fs)
+{
+ sdp_attr_t *attr_p;
+ sdp_fmtp_t *fmtp_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ fmtp_p = &(attr_p->attr.fmtp);
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+
+ if (max_fs > 0) {
+ fmtp_p->max_fs = max_fs;
+ return (SDP_SUCCESS);
+ } else {
+ return (SDP_FAILURE);
+ }
+}
+
+sdp_result_e sdp_attr_set_fmtp_max_fr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t max_fr)
+{
+ sdp_attr_t *attr_p;
+ sdp_fmtp_t *fmtp_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ fmtp_p = &(attr_p->attr.fmtp);
+ fmtp_p->fmtp_format = SDP_FMTP_CODEC_INFO;
+
+ if (max_fr > 0) {
+ fmtp_p->max_fr = max_fr;
+ return (SDP_SUCCESS);
+ } else {
+ return (SDP_FAILURE);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_average_bitrate
+ * Description: Gets the value of the fmtp attribute- maxaveragebitrate parameter for the OPUS codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-br value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_average_bitrate (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP, 1);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.maxaveragebitrate;
+ return (SDP_SUCCESS);
+ }
+}
+
+
+/* Function: sdp_attr_get_fmtp_usedtx
+ * Description: Gets the value of the fmtp attribute- usedtx parameter for the OPUS codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: usedtx value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_usedtx (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = (tinybool)attr_p->attr.fmtp.usedtx;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_usedtx
+ * Description: Gets the value of the fmtp attribute- usedtx parameter for the OPUS codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: stereo value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_stereo (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = (tinybool)attr_p->attr.fmtp.stereo;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_useinbandfec
+ * Description: Gets the value of the fmtp attribute useinbandfec parameter for the OPUS codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: useinbandfec value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_useinbandfec (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = (tinybool)attr_p->attr.fmtp.useinbandfec;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_maxcodedaudiobandwidth
+ * Description: Gets the value of the fmtp attribute maxcodedaudiobandwidth parameter for OPUS codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: maxcodedaudiobandwidth value.
+ */
+char* sdp_attr_get_fmtp_maxcodedaudiobandwidth (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.fmtp.maxcodedaudiobandwidth);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_cbr
+ * Description: Gets the value of the fmtp attribute cbr parameter for the OPUS codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: cbr value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_cbr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, tinybool* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = (tinybool)attr_p->attr.fmtp.cbr;
+ return (SDP_SUCCESS);
+ }
+}
+
+uint16_t sdp_attr_get_sctpmap_port(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SCTPMAP, inst_num);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sctpmap port, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return 0;
+ } else {
+ return attr_p->attr.sctpmap.port;
+ }
+}
+
+sdp_result_e sdp_attr_get_sctpmap_streams (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SCTPMAP, inst_num);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sctpmap streams, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.sctpmap.streams;
+ return (SDP_SUCCESS);
+ }
+}
+
+sdp_result_e sdp_attr_get_sctpmap_protocol (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ char* protocol)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_SCTPMAP,
+ inst_num);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s sctpmap, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ sstrncpy(protocol, attr_p->attr.sctpmap.protocol, SDP_MAX_STRING_LEN+1);
+ }
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_fmtp_is_annexb_set
+ * Description: Gives the value of the fmtp attribute annexb type parameter
+ * for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ *
+ *
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_attr_fmtp_is_annexb_set (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annexb);
+ }
+}
+
+/* Function: sdp_attr_fmtp_is_annexa_set
+ * Description: Gives the value of the fmtp attribute annexa type parameter
+ * for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ *
+ *
+ * Returns: TRUE or FALSE.
+ */
+tinybool sdp_attr_fmtp_is_annexa_set (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annexa);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_bitrate_type
+ * Description: Gets the value of the fmtp attribute bitrate type parameter
+ * for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Bitrate type value.
+ */
+int32_t sdp_attr_get_fmtp_bitrate_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.bitrate);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_qcif
+ * Description: Gets the value of the fmtp attribute QCIF type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: QCIF value.
+ */
+int32_t sdp_attr_get_fmtp_qcif (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.qcif);
+ }
+}
+/* Function: sdp_attr_get_fmtp_cif
+ * Description: Gets the value of the fmtp attribute CIF type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CIF value.
+ */
+int32_t sdp_attr_get_fmtp_cif (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.cif);
+ }
+}
+
+
+/* Function: sdp_attr_get_fmtp_sqcif
+ * Description: Gets the value of the fmtp attribute sqcif type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: sqcif value.
+ */
+int32_t sdp_attr_get_fmtp_sqcif (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.sqcif);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_cif4
+ * Description: Gets the value of the fmtp attribute CIF4 type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CIF4 value.
+ */
+int32_t sdp_attr_get_fmtp_cif4 (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.cif4);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_cif16
+ * Description: Gets the value of the fmtp attribute CIF16 type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CIF16 value.
+ */
+
+int32_t sdp_attr_get_fmtp_cif16 (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.cif16);
+ }
+}
+
+
+/* Function: sdp_attr_get_fmtp_maxbr
+ * Description: Gets the value of the fmtp attribute MAXBR type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: MAXBR value.
+ */
+int32_t sdp_attr_get_fmtp_maxbr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.maxbr);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_custom_x
+ * Description: Gets the value of the fmtp attribute CUSTOM type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CUSTOM x value.
+ */
+
+int32_t sdp_attr_get_fmtp_custom_x (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.custom_x);
+ }
+}
+/* Function: sdp_attr_get_fmtp_custom_y
+ * Description: Gets the value of the fmtp attribute custom_y type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CUSTOM Y-AXIS value.
+ */
+
+int32_t sdp_attr_get_fmtp_custom_y (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.custom_y);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_custom_mpi
+ * Description: Gets the value of the fmtp attribute CUSTOM type parameter
+ * for a given Video codec.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CUSTOM MPI value.
+ */
+
+int32_t sdp_attr_get_fmtp_custom_mpi (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.custom_mpi);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_par_width
+ * Description: Gets the value of the fmtp attribute PAR (width) parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: PAR - width value.
+ */
+int32_t sdp_attr_get_fmtp_par_width (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.par_width);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_par_height
+ * Description: Gets the value of the fmtp attribute PAR (height) parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: PAR - height value.
+ */
+int32_t sdp_attr_get_fmtp_par_height (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.par_height);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_cpcf
+ * Description: Gets the value of the fmtp attribute- CPCF parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: CPCF value.
+ */
+int32_t sdp_attr_get_fmtp_cpcf (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.cpcf);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_bpp
+ * Description: Gets the value of the fmtp attribute- BPP parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: BPP value.
+ */
+int32_t sdp_attr_get_fmtp_bpp (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.bpp);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_hrd
+ * Description: Gets the value of the fmtp attribute- HRD parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: HRD value.
+ */
+int32_t sdp_attr_get_fmtp_hrd (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.hrd);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_profile
+ * Description: Gets the value of the fmtp attribute- PROFILE parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: PROFILE value.
+ */
+int32_t sdp_attr_get_fmtp_profile (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.profile);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_level
+ * Description: Gets the value of the fmtp attribute- LEVEL parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: LEVEL value.
+ */
+int32_t sdp_attr_get_fmtp_level (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.level);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_interlace
+ * Description: Checks if INTERLACE parameter is set.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: TRUE if INTERLACE is present and FALSE if INTERLACE is absent.
+ */
+tinybool sdp_attr_get_fmtp_interlace (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return FALSE;
+ } else {
+ return (attr_p->attr.fmtp.is_interlace);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_pack_mode
+ * Description: Gets the value of the fmtp attribute- packetization-mode parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: packetization-mode value in the range 0 - 2.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_pack_mode (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t *val)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (SDP_INVALID_PACKETIZATION_MODE_VALUE == attr_p->attr.fmtp.packetization_mode) {
+ /* packetization mode unspecified (optional) */
+ *val = SDP_DEFAULT_PACKETIZATION_MODE_VALUE;
+ } else {
+ *val = attr_p->attr.fmtp.packetization_mode;
+ }
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_level_asymmetry_allowed
+ * Description: Gets the value of the fmtp attribute- level-asymmetry-allowed parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: level asymmetry allowed value (0 or 1).
+ */
+
+sdp_result_e sdp_attr_get_fmtp_level_asymmetry_allowed (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t *val)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.level_asymmetry_allowed;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_profile_id
+ * Description: Gets the value of the fmtp attribute- profile-level-id parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: profile-level-id value.
+ */
+const char* sdp_attr_get_fmtp_profile_id (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.fmtp.profile_level_id);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_param_sets
+ * Description: Gets the value of the fmtp attribute- parameter-sets parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: parameter-sets value.
+ */
+const char* sdp_attr_get_fmtp_param_sets (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.fmtp.parameter_sets);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_interleaving_depth
+ * Description: Gets the value of the fmtp attribute- interleaving_depth parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: interleaving_depth value
+ */
+
+sdp_result_e sdp_attr_get_fmtp_interleaving_depth (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.interleaving_depth;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_deint_buf_req
+ * Description: Gets the value of the fmtp attribute- deint-buf-req parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: deint-buf-req value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_deint_buf_req (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (attr_p->attr.fmtp.flag & SDP_DEINT_BUF_REQ_FLAG) {
+ *val = attr_p->attr.fmtp.deint_buf_req;
+ return (SDP_SUCCESS);
+ } else {
+ return (SDP_FAILURE);
+ }
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_don_diff
+ * Description: Gets the value of the fmtp attribute- max-don-diff parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-don-diff value.
+ */
+sdp_result_e sdp_attr_get_fmtp_max_don_diff (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_don_diff;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_init_buf_time
+ * Description: Gets the value of the fmtp attribute- init-buf-time parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: init-buf-time value.
+ */
+sdp_result_e sdp_attr_get_fmtp_init_buf_time (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (attr_p->attr.fmtp.flag & SDP_INIT_BUF_TIME_FLAG) {
+ *val = attr_p->attr.fmtp.init_buf_time;
+ return (SDP_SUCCESS);
+ } else {
+ return (SDP_FAILURE);
+ }
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_mbps
+ * Description: Gets the value of the fmtp attribute- max-mbps parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-mbps value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_mbps (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_mbps;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_fs
+ * Description: Gets the value of the fmtp attribute- max-fs parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-fs value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_fs (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_fs;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_fr
+ * Description: Gets the value of the fmtp attribute- max-fr parameter
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-fr value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_fr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_fr;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_cpb
+ * Description: Gets the value of the fmtp attribute- max-cpb parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-cpb value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_cpb (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_cpb;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_dpb
+ * Description: Gets the value of the fmtp attribute- max-dpb parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-dpb value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_dpb (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t *val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_dpb;
+ return (SDP_SUCCESS);
+ }
+}
+
+
+/* Function: sdp_attr_get_fmtp_max_br
+ * Description: Gets the value of the fmtp attribute- max-br parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-br value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_max_br (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint32_t* val)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ *val = attr_p->attr.fmtp.max_br;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_fmtp_is_redundant_pic_cap
+ * Description: Gets the value of the fmtp attribute- redundant_pic_cap parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: redundant-pic-cap value.
+ */
+tinybool sdp_attr_fmtp_is_redundant_pic_cap (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.redundant_pic_cap);
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_deint_buf_cap
+ * Description: Gets the value of the fmtp attribute- deint-buf-cap parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: deint-buf-cap value.
+ */
+
+sdp_result_e sdp_attr_get_fmtp_deint_buf_cap (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (attr_p->attr.fmtp.flag & SDP_DEINT_BUF_CAP_FLAG) {
+ *val = attr_p->attr.fmtp.deint_buf_cap;
+ return (SDP_SUCCESS);
+ } else {
+ return (SDP_FAILURE);
+ }
+ }
+}
+
+/* Function: sdp_attr_get_fmtp_max_rcmd_nalu_size
+ * Description: Gets the value of the fmtp attribute- max-rcmd-nalu-size parameter for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: max-rcmd-nalu-size value.
+ */
+sdp_result_e sdp_attr_get_fmtp_max_rcmd_nalu_size (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint32_t *val)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ if (attr_p->attr.fmtp.flag & SDP_MAX_RCMD_NALU_SIZE_FLAG) {
+ *val = attr_p->attr.fmtp.max_rcmd_nalu_size;
+ return (SDP_SUCCESS);
+ } else {
+ return (SDP_FAILURE);
+ }
+ }
+}
+
+/* Function: sdp_attr_fmtp_is_parameter_add
+ * Description: Gets the value of the fmtp attribute- parameter-add for H.264 codec
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: TRUE/FALSE ( parameter-add is boolean)
+ */
+tinybool sdp_attr_fmtp_is_parameter_add (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ /* Both 1 and SDP_FMTP_UNUSED (parameter not present) should be
+ * treated as TRUE, per RFC 3984, page 45 */
+ return (attr_p->attr.fmtp.parameter_add != 0);
+ }
+}
+
+/****** Following functions are get routines for Annex values
+ * For each Annex support, the get routine will return the boolean TRUE/FALSE
+ * Some Annexures for Video codecs have values defined . In those cases,
+ * (e.g Annex K, P ) , the return values are not boolean.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Annex value
+ */
+
+tinybool sdp_attr_get_fmtp_annex_d (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annex_d);
+ }
+}
+
+tinybool sdp_attr_get_fmtp_annex_f (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annex_f);
+ }
+}
+
+tinybool sdp_attr_get_fmtp_annex_i (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annex_i);
+ }
+}
+
+tinybool sdp_attr_get_fmtp_annex_j (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annex_j);
+ }
+}
+
+tinybool sdp_attr_get_fmtp_annex_t (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.fmtp.annex_t);
+ }
+}
+
+int32_t sdp_attr_get_fmtp_annex_k_val (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.annex_k_val);
+ }
+}
+
+int32_t sdp_attr_get_fmtp_annex_n_val (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.annex_n_val);
+ }
+}
+
+int32_t sdp_attr_get_fmtp_annex_p_picture_resize (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.annex_p_val_picture_resize);
+ }
+}
+
+int32_t sdp_attr_get_fmtp_annex_p_warp (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ } else {
+ return (attr_p->attr.fmtp.annex_p_val_warp);
+ }
+}
+
+/* Function: sdp_attr_fmtp_get_fmtp_format
+ * Description: Gives the value of the fmtp attribute fmtp_format
+ * type parameter
+ * for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ *
+ *
+ * Returns: Enum type sdp_fmtp_format_type_e
+ */
+sdp_fmtp_format_type_e sdp_attr_fmtp_get_fmtp_format (sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_FMTP,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s fmtp attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_FMTP_UNKNOWN_TYPE);
+ } else {
+ return (attr_p->attr.fmtp.fmtp_format);
+ }
+}
+
+/* Function: sdp_attr_get_pccodec_num_payload_types
+ * Description: Returns the number of payload types specified for the
+ * given X-pc-codec attribute. If the given attribute is not
+ * defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Number of payload types.
+ */
+uint16_t sdp_attr_get_pccodec_num_payload_types (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_X_PC_CODEC,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-pc-codec attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.pccodec.num_payloads);
+ }
+}
+
+/* Function: sdp_attr_get_pccodec_payload_type
+ * Description: Returns the value of the specified payload type for the
+ * given X-pc-codec attribute. If the given attribute is not
+ * defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * payload_num The payload number to get. Range is (1 -
+ * max num payloads).
+ * Returns: Payload type.
+ */
+uint16_t sdp_attr_get_pccodec_payload_type (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint16_t payload_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_X_PC_CODEC,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-pc-codec attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ if ((payload_num < 1) ||
+ (payload_num > attr_p->attr.pccodec.num_payloads)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-pc-codec attribute, level %u instance %u, "
+ "invalid payload number %u requested.",
+ sdp_p->debug_str, (unsigned)level, (unsigned)inst_num, (unsigned)payload_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ return (attr_p->attr.pccodec.payload_type[payload_num-1]);
+ }
+ }
+}
+
+/* Function: sdp_attr_add_pccodec_payload_type
+ * Description: Add a new value to the list of payload types specified for
+ * the given X-pc-codec attribute. The payload type will be
+ * added to the end of the list so these values should be added
+ * in the order they will be displayed within the attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * payload_type The payload type to add.
+ * Returns: SDP_SUCCESS Payload type was added successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e sdp_attr_add_pccodec_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ uint16_t payload_type)
+{
+ uint16_t payload_num;
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_X_PC_CODEC,
+ inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-pc-codec attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ payload_num = attr_p->attr.pccodec.num_payloads++;
+ attr_p->attr.pccodec.payload_type[payload_num] = payload_type;
+ return (SDP_SUCCESS);
+ }
+}
+
+/* Function: sdp_attr_get_xcap_first_cap_num
+ * Description: Gets the first capability number valid for the specified
+ * X-cap attribute instance. If the capability is not
+ * defined, zero is returned.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the capability.
+ * inst_num The X-cap instance number to check.
+ * Returns: Capability number or zero.
+ */
+uint16_t sdp_attr_get_xcap_first_cap_num (sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ uint16_t cap_num=1;
+ uint16_t attr_count=0;
+ sdp_attr_t *attr_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ for (attr_p = sdp_p->sess_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_X_CAP) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (cap_num);
+ } else {
+ cap_num += attr_p->attr.cap_p->num_payloads;
+ }
+ }
+ }
+ } else { /* Capability is at a media level */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ }
+ for (attr_p = mca_p->media_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_X_CAP) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (cap_num);
+ } else {
+ cap_num += attr_p->attr.cap_p->num_payloads;
+ }
+ }
+ }
+ } /* Attr is at a media level */
+
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+}
+
+/* Function: sdp_attr_get_xcap_media_type
+ * Description: Returns the media type specified for the given X-cap
+ * attribute. If the given attribute is not defined,
+ * SDP_MEDIA_INVALID is returned.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * Returns: Media type or SDP_MEDIA_INVALID.
+ */
+sdp_media_e sdp_attr_get_xcap_media_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cap_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_X_CAP, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_MEDIA_INVALID);
+ } else {
+ cap_p = attr_p->attr.cap_p;
+ return (cap_p->media);
+ }
+}
+
+/* Function: sdp_attr_get_xcap_transport_type
+ * Description: Returns the transport type specified for the given X-cap
+ * attribute. If the given attribute is not defined,
+ * SDP_TRANSPORT_INVALID is returned.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * Returns: Media type or SDP_TRANSPORT_INVALID.
+ */
+sdp_transport_e sdp_attr_get_xcap_transport_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cap_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_X_CAP,
+ inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_TRANSPORT_INVALID);
+ } else {
+ cap_p = attr_p->attr.cap_p;
+ return (cap_p->transport);
+ }
+}
+
+/* Function: sdp_attr_get_xcap_num_payload_types
+ * Description: Returns the number of payload types associated with the
+ * specified X-cap attribute. If the attribute is invalid,
+ * zero will be returned. Application must validate the
+ * attribute line before using this routine.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Number of payload types or zero.
+ */
+uint16_t sdp_attr_get_xcap_num_payload_types (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cap_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_X_CAP, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ cap_p = attr_p->attr.cap_p;
+ return (cap_p->num_payloads);
+ }
+}
+
+/* Function: sdp_attr_get_xcap_payload_type
+ * Description: Returns the payload type of the specified payload for the
+ * X-cap attribute line. If the attr line or payload number is
+ * invalid, zero will be returned. Application must validate
+ * the X-cap attr before using this routine.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * payload_num The payload number to retrieve. Range is
+ * (1 - max num payloads).
+ * Returns: Payload type or zero.
+ */
+uint16_t sdp_attr_get_xcap_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_num,
+ sdp_payload_ind_e *indicator)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cap_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_X_CAP, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ cap_p = attr_p->attr.cap_p;
+ if ((payload_num < 1) ||
+ (payload_num > cap_p->num_payloads)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u, "
+ "payload num %u invalid.", sdp_p->debug_str,
+ (unsigned)level, (unsigned)inst_num, (unsigned)payload_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ *indicator = cap_p->payload_indicator[payload_num-1];
+ return (cap_p->payload_type[payload_num-1]);
+ }
+ }
+}
+
+
+/* Function: sdp_attr_add_xcap_payload_type
+ * Description: Add a new payload type for the X-cap attribute line
+ * specified. The new payload type will be added at the end
+ * of the payload type list.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * payload_type The new payload type.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_attr_add_xcap_payload_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_type,
+ sdp_payload_ind_e indicator)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cap_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_X_CAP, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-cap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ cap_p = attr_p->attr.cap_p;
+ cap_p->payload_indicator[cap_p->num_payloads] = indicator;
+ cap_p->payload_type[cap_p->num_payloads++] = payload_type;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_get_cdsc_first_cap_num
+ * Description: Gets the first capability number valid for the specified
+ * CDSC attribute instance. If the capability is not
+ * defined, zero is returned.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the capability.
+ * inst_num The CDSC instance number to check.
+ * Returns: Capability number or zero.
+ */
+uint16_t sdp_attr_get_cdsc_first_cap_num(sdp_t *sdp_p, uint16_t level, uint16_t inst_num)
+{
+ uint16_t cap_num=1;
+ uint16_t attr_count=0;
+ sdp_attr_t *attr_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ for (attr_p = sdp_p->sess_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_CDSC) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (cap_num);
+ } else {
+ cap_num += attr_p->attr.cap_p->num_payloads;
+ }
+ }
+ }
+ } else { /* Capability is at a media level */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ }
+ for (attr_p = mca_p->media_attrs_p; attr_p != NULL;
+ attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_CDSC) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (cap_num);
+ } else {
+ cap_num += attr_p->attr.cap_p->num_payloads;
+ }
+ }
+ }
+ } /* Attr is at a media level */
+
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+}
+
+/* Function: sdp_attr_get_cdsc_media_type
+ * Description: Returns the media type specified for the given CDSC
+ * attribute. If the given attribute is not defined,
+ * SDP_MEDIA_INVALID is returned.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * Returns: Media type or SDP_MEDIA_INVALID.
+ */
+sdp_media_e sdp_attr_get_cdsc_media_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cdsc_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_CDSC, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_MEDIA_INVALID);
+ } else {
+ cdsc_p = attr_p->attr.cap_p;
+ return (cdsc_p->media);
+ }
+}
+
+/* Function: sdp_attr_get_cdsc_transport_type
+ * Description: Returns the transport type specified for the given CDSC
+ * attribute. If the given attribute is not defined,
+ * SDP_TRANSPORT_INVALID is returned.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * Returns: Media type or SDP_TRANSPORT_INVALID.
+ */
+sdp_transport_e sdp_attr_get_cdsc_transport_type(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cdsc_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_CDSC,
+ inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_TRANSPORT_INVALID);
+ } else {
+ cdsc_p = attr_p->attr.cap_p;
+ return (cdsc_p->transport);
+ }
+}
+
+/* Function: sdp_attr_get_cdsc_num_payload_types
+ * Description: Returns the number of payload types associated with the
+ * specified CDSC attribute. If the attribute is invalid,
+ * zero will be returned. Application must validate the
+ * attribute line before using this routine.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Number of payload types or zero.
+ */
+uint16_t sdp_attr_get_cdsc_num_payload_types (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cdsc_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_CDSC, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ cdsc_p = attr_p->attr.cap_p;
+ return (cdsc_p->num_payloads);
+ }
+}
+
+/* Function: sdp_attr_get_cdsc_payload_type
+ * Description: Returns the payload type of the specified payload for the
+ * CDSC attribute line. If the attr line or payload number is
+ * invalid, zero will be returned. Application must validate
+ * the CDSC attr before using this routine.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * payload_num The payload number to retrieve. Range is
+ * (1 - max num payloads).
+ * Returns: Payload type or zero.
+ */
+uint16_t sdp_attr_get_cdsc_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_num,
+ sdp_payload_ind_e *indicator)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cdsc_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_CDSC, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ cdsc_p = attr_p->attr.cap_p;
+ if ((payload_num < 1) ||
+ (payload_num > cdsc_p->num_payloads)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u, "
+ "payload num %u invalid.", sdp_p->debug_str,
+ (unsigned)level, (unsigned)inst_num, (unsigned)payload_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ *indicator = cdsc_p->payload_indicator[payload_num-1];
+ return (cdsc_p->payload_type[payload_num-1]);
+ }
+ }
+}
+
+/* Function: sdp_attr_add_cdsc_payload_type
+ * Description: Add a new payload type for the CDSC attribute line
+ * specified. The new payload type will be added at the end
+ * of the payload type list.
+ * Note: cap_num is not specified. It must be zero.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * payload_type The new payload type.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER
+ */
+sdp_result_e sdp_attr_add_cdsc_payload_type (sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num, uint16_t payload_type,
+ sdp_payload_ind_e indicator)
+{
+ sdp_attr_t *attr_p;
+ sdp_mca_t *cdsc_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_CDSC, inst_num);
+ if ((attr_p == NULL) || (attr_p->attr.cap_p == NULL)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s CDSC attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ cdsc_p = attr_p->attr.cap_p;
+ cdsc_p->payload_indicator[cdsc_p->num_payloads] = indicator;
+ cdsc_p->payload_type[cdsc_p->num_payloads++] = payload_type;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_media_dynamic_payload_valid
+ * Description: Checks if the dynamic payload type passed in is defined
+ * on the media line m_line
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * payload_type Payload type to be checked
+ *
+ * Returns: TRUE or FALSE. Returns TRUE if payload type is defined on the
+ * media line, else returns FALSE
+ */
+
+tinybool sdp_media_dynamic_payload_valid (sdp_t *sdp_p, uint16_t payload_type,
+ uint16_t m_line)
+{
+ uint16_t p_type,m_ptype;
+ ushort num_payload_types;
+ sdp_payload_ind_e ind;
+ tinybool payload_matches = FALSE;
+ tinybool result = TRUE;
+
+ if ((payload_type < SDP_MIN_DYNAMIC_PAYLOAD) ||
+ (payload_type > SDP_MAX_DYNAMIC_PAYLOAD)) {
+ return FALSE;
+ }
+
+ num_payload_types =
+ sdp_get_media_num_payload_types(sdp_p, m_line);
+
+ for(p_type=1; p_type <=num_payload_types;p_type++){
+
+ m_ptype = (uint16_t)sdp_get_media_payload_type(sdp_p,
+ m_line, p_type, &ind);
+ if (payload_type == m_ptype) {
+ payload_matches = TRUE;
+ break;
+ }
+
+ }
+
+ if (!payload_matches) {
+ return FALSE;
+ }
+
+ return (result);
+
+}
+
+/* Function: sdp_attr_get_rtr_confirm
+ * Description: Returns the value of the rtr attribute confirm
+ * parameter specified for the given attribute. Returns TRUE if
+ * the confirm parameter is specified.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Boolean value.
+ */
+tinybool sdp_attr_get_rtr_confirm (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_RTR, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s %s attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str,
+ sdp_get_attr_name(SDP_ATTR_RTR), (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.rtr.confirm);
+ }
+}
+
+
+
+sdp_mediadir_role_e sdp_attr_get_comediadir_role (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_DIRECTION, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Comediadir role attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_MEDIADIR_ROLE_UNKNOWN);
+ } else {
+ return (attr_p->attr.comediadir.role);
+ }
+}
+
+/* Function: sdp_attr_get_silencesupp_enabled
+ * Description: Returns the value of the silencesupp attribute enable
+ * parameter specified for the given attribute. Returns TRUE if
+ * the confirm parameter is specified.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Boolean value.
+ */
+tinybool sdp_attr_get_silencesupp_enabled (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SILENCESUPP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s silenceSuppEnable attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (FALSE);
+ } else {
+ return (attr_p->attr.silencesupp.enabled);
+ }
+}
+
+/* Function: sdp_attr_get_silencesupp_timer
+ * Description: Returns the value of the silencesupp attribute timer
+ * parameter specified for the given attribute. null_ind
+ * is set to TRUE if no value was specified, but instead the
+ * null "-" value was specified.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: 16-bit timer value
+ * boolean null_ind
+ */
+uint16_t sdp_attr_get_silencesupp_timer (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ tinybool *null_ind)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SILENCESUPP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s silenceTimer attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ *null_ind = attr_p->attr.silencesupp.timer_null;
+ return (attr_p->attr.silencesupp.timer);
+ }
+}
+
+/* Function: sdp_attr_get_silencesupp_pref
+ * Description: Sets the silencesupp supppref value
+ * If this parameter is TRUE, the confirm parameter will be
+ * specified when the SDP description is built.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * confirm New qos confirm parameter.
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_silencesupp_pref_e sdp_attr_get_silencesupp_pref (sdp_t *sdp_p,
+ uint16_t level, uint8_t cap_num,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SILENCESUPP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s silence suppPref attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_SILENCESUPP_PREF_UNKNOWN);
+ } else {
+ return (attr_p->attr.silencesupp.pref);
+ }
+}
+
+/* Function: sdp_attr_get_silencesupp_siduse
+ * Description: Returns the value of the silencesupp attribute siduse
+ * parameter specified for the given attribute. If the given
+ * attribute is not defined, SDP_QOS_STRENGTH_UNKNOWN is
+ * returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: silencesupp siduse enum.
+ */
+sdp_silencesupp_siduse_e sdp_attr_get_silencesupp_siduse (sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SILENCESUPP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s silence sidUse attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_SILENCESUPP_SIDUSE_UNKNOWN);
+ } else {
+ return (attr_p->attr.silencesupp.siduse);
+ }
+}
+
+/* Function: sdp_attr_get_silencesupp_fxnslevel
+ * Description: Returns the value of the silencesupp attribute fxns
+ * (fixed noise) parameter specified for the given attribute.
+ * null_ind is set to TRUE if no value was specified,
+ * but instead the null "-" value was specified.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: 7-bit fxns value
+ * boolean null_ind
+ */
+uint8_t sdp_attr_get_silencesupp_fxnslevel (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ tinybool *null_ind)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SILENCESUPP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s silence fxnslevel attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ *null_ind = attr_p->attr.silencesupp.fxnslevel_null;
+ return (attr_p->attr.silencesupp.fxnslevel);
+ }
+}
+
+/* Function: sdp_attr_get_mptime_num_intervals
+ * Description: Returns the number of intervals specified for the
+ * given mptime attribute. If the given attribute is not
+ * defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Number of intervals.
+ */
+uint16_t sdp_attr_get_mptime_num_intervals (
+ sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num) {
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_MPTIME, inst_num);
+ if (attr_p != NULL) {
+ return attr_p->attr.mptime.num_intervals;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s mptime attribute, level %u instance %u not found.",
+ sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return 0;
+}
+
+/* Function: sdp_attr_get_mptime_interval
+ * Description: Returns the value of the specified interval for the
+ * given mptime attribute. If the given attribute is not
+ * defined, zero is returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * interval_num The interval number to get. Range is (1 -
+ * max num payloads).
+ * Returns: Interval.
+ */
+uint16_t sdp_attr_get_mptime_interval (
+ sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num,
+ uint16_t interval_num) {
+
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_MPTIME, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s mptime attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return 0;
+ }
+
+ if ((interval_num<1) || (interval_num>attr_p->attr.mptime.num_intervals)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s mptime attribute, level %u instance %u, "
+ "invalid interval number %u requested.",
+ sdp_p->debug_str, (unsigned)level, (unsigned)inst_num, (unsigned)interval_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return 0;
+ }
+
+ return attr_p->attr.mptime.intervals[interval_num-1];
+}
+
+/* Function: sdp_attr_add_mptime_interval
+ * Description: Add a new value to the list of intervals specified for
+ * the given mptime attribute. The interval will be
+ * added to the end of the list so these values should be added
+ * in the order they will be displayed within the attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * mp_interval The interval to add.
+ * Returns: SDP_SUCCESS Interval was added successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ * SDP_INVALID_SDP_PTR Supplied SDP pointer is invalid
+ */
+sdp_result_e sdp_attr_add_mptime_interval (
+ sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num,
+ uint16_t mp_interval) {
+
+ uint16_t interval_num;
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num, SDP_ATTR_MPTIME, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s mptime attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ interval_num = attr_p->attr.mptime.num_intervals;
+ if (interval_num>=SDP_MAX_PAYLOAD_TYPES) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s mptime attribute, level %u instance %u "
+ "exceeds maximum length.",
+ sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ attr_p->attr.mptime.intervals[interval_num] = mp_interval;
+ ++attr_p->attr.mptime.num_intervals;
+ return SDP_SUCCESS;
+}
+
+
+
+/* Function: sdp_get_group_attr
+ * Description: Returns the attribute parameter from the a=group:<>
+ * line. If no attrib has been set ,
+ * SDP_GROUP_ATTR_UNSUPPORTED will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level SDP_SESSION_LEVEL
+ * Returns: Valid attrib value or SDP_GROUP_ATTR_UNSUPPORTED.
+ */
+sdp_group_attr_e sdp_get_group_attr (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_GROUP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Group (a= group line) attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_GROUP_ATTR_UNSUPPORTED);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Stream data group attr field is :%s ",
+ sdp_p->debug_str,
+ sdp_get_group_attr_name(attr_p->attr.stream_data.group_attr) );
+ }
+ return (attr_p->attr.stream_data.group_attr);
+ }
+}
+
+/* Function: sdp_get_group_num_id
+ * Description: Returns the number of ids from the a=group:<> line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level SDP_SESSION_LEVEL
+ * Returns: Num of group ids present or 0 if there is an error.
+ */
+uint16_t sdp_get_group_num_id (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_GROUP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s a=group level attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (0);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Stream data group attr - num of ids is :%u ",
+ sdp_p->debug_str,
+ (unsigned)attr_p->attr.stream_data.num_group_id);
+ }
+ }
+ return (attr_p->attr.stream_data.num_group_id);
+}
+
+/* Function: sdp_get_group_id
+ * Description: Returns the group id from the a=group:<> line.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level SDP_SESSION_LEVEL
+ * id_num Number of the id to retrieve. The range is (1 -
+ * SDP_MAX_GROUP_STREAM_ID)
+ * Returns: Value of the group id at the index specified or
+ * NULL if an error
+ */
+const char* sdp_get_group_id (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num, uint16_t id_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_GROUP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s a=group level attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Stream data group attr - num of ids is :%u ",
+ sdp_p->debug_str,
+ (unsigned)attr_p->attr.stream_data.num_group_id);
+ }
+ if ((id_num < 1) || (id_num > attr_p->attr.stream_data.num_group_id)) {
+ return (NULL);
+ }
+ }
+ return (attr_p->attr.stream_data.group_ids[id_num-1]);
+}
+
+/* Function: sdp_attr_get_x_sidin
+ * Description: Returns the attribute parameter from the a=X-sidin:<>
+ * line. If no attrib has been set NULL will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level media level index
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Pointer to sidin or NULL.
+ */
+const char* sdp_attr_get_x_sidin (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_X_SIDIN, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-sidin attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Stream X-sidin attr field is :%s ",
+ sdp_p->debug_str,
+ attr_p->attr.stream_data.x_sidin);
+ }
+ return (attr_p->attr.stream_data.x_sidin);
+ }
+}
+
+/* Function: sdp_attr_get_x_sidout
+ * Description: Returns the attribute parameter from the a=X-sidout:<>
+ * line. If no attrib has been set NULL will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level media level index
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Pointer to sidout or NULL.
+ */
+const char* sdp_attr_get_x_sidout (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_X_SIDOUT, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-sidout attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Stream X-sidout attr field is :%s ",
+ sdp_p->debug_str,
+ attr_p->attr.stream_data.x_sidout);
+ }
+ return (attr_p->attr.stream_data.x_sidout);
+ }
+}
+
+/* Function: sdp_attr_get_x_confid
+ * Description: Returns the attribute parameter from the a=X-confid:<>
+ * line. If no attrib has been set NULL will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level media level index
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Pointer to confid or NULL.
+ */
+const char* sdp_attr_get_x_confid (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_X_CONFID, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s X-confid attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Stream X-confid attr field is :%s ",
+ sdp_p->debug_str,
+ attr_p->attr.stream_data.x_confid);
+ }
+ return (attr_p->attr.stream_data.x_confid);
+ }
+}
+
+/* Function: sdp_get_source_filter_mode
+ * Description: Gets the filter mode in internal representation
+ * Parameters: sdp_p The SDP handle which contains the attributes
+ * level SDP_SESSION_LEVEL/m-line number
+ * inst_num The attribute instance number
+ * Returns: Filter mode (incl/excl/not present)
+ */
+sdp_src_filter_mode_e
+sdp_get_source_filter_mode (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SOURCE_FILTER, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Source filter attribute, level %u, "
+ "instance %u not found", sdp_p->debug_str,
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_FILTER_MODE_NOT_PRESENT);
+ }
+ return (attr_p->attr.source_filter.mode);
+}
+
+/* Function: sdp_get_filter_destination_attributes
+ * Description: Gets the destination address parameters
+ * Parameters: Network type (optional), destination address type
+ * (optional), and destination address (mandatory) variables
+ * which gets updated.
+ * Returns: SDP_SUCCESS or SDP_INVALID_PARAMETER/SDP_INVALID_SDP_PTR
+ */
+sdp_result_e
+sdp_get_filter_destination_attributes (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, sdp_nettype_e *nettype,
+ sdp_addrtype_e *addrtype,
+ char *dest_addr)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SOURCE_FILTER, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Source filter attribute, level %u instance %u "
+ "not found", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ if (nettype) {
+ *nettype = attr_p->attr.source_filter.nettype;
+ }
+ if (addrtype) {
+ *addrtype = attr_p->attr.source_filter.addrtype;
+ }
+ sstrncpy(dest_addr, attr_p->attr.source_filter.dest_addr,
+ SDP_MAX_STRING_LEN+1);
+
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_get_filter_source_address_count
+ * Description: Gets the number of source addresses in the list
+ * Parameters: sdp_p The SDP handle which contains the attributes
+ * level SDP_SESSION_LEVEL/m-line number
+ * inst_num The attribute instance number
+ * Returns: Source-list count
+ */
+
+int32_t
+sdp_get_filter_source_address_count (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SOURCE_FILTER, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Source filter attribute, level %u instance %u "
+ "not found", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_VALUE);
+ }
+ return (attr_p->attr.source_filter.num_src_addr);
+}
+
+/* Function: sdp_get_filter_source_address
+ * Description: Gets one of the source address that is indexed by the user
+ * Parameters: sdp_p The SDP handle which contains the attributes
+ * level SDP_SESSION_LEVEL/m-line number
+ * inst_num The attribute instance number
+ * src_addr_id User provided index (value in range between
+ * 0 to (SDP_MAX_SRC_ADDR_LIST-1) which obtains
+ * the source addr corresponding to it.
+ * src_addr The user provided variable which gets updated
+ * with source address corresponding to the index
+ */
+sdp_result_e
+sdp_get_filter_source_address (sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num, uint16_t src_addr_id,
+ char *src_addr)
+{
+ sdp_attr_t *attr_p;
+
+ src_addr[0] = '\0';
+
+ if (src_addr_id >= SDP_MAX_SRC_ADDR_LIST) {
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SOURCE_FILTER, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s Source filter attribute, level %u instance %u "
+ "not found", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ if (src_addr_id >= attr_p->attr.source_filter.num_src_addr) {
+ return (SDP_INVALID_PARAMETER);
+ }
+ sstrncpy(src_addr, attr_p->attr.source_filter.src_list[src_addr_id],
+ SDP_MAX_STRING_LEN+1);
+
+ return (SDP_SUCCESS);
+}
+
+sdp_rtcp_unicast_mode_e
+sdp_get_rtcp_unicast_mode(sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_RTCP_UNICAST, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s RTCP Unicast attribute, level %u, "
+ "instance %u not found", sdp_p->debug_str,
+ (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_RTCP_UNICAST_MODE_NOT_PRESENT);
+ }
+ return ((sdp_rtcp_unicast_mode_e)attr_p->attr.u32_val);
+}
+
+
+/* Function: sdp_attr_get_sdescriptions_tag
+ * Description: Returns the value of the sdescriptions tag
+ * parameter specified for the given attribute.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: Tag value or SDP_INVALID_VALUE (-2) if error encountered.
+ */
+
+int32_t
+sdp_attr_get_sdescriptions_tag (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute tag, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_VALUE;
+ } else {
+ return attr_p->attr.srtp_context.tag;
+ }
+}
+
+/* Function: sdp_attr_get_sdescriptions_crypto_suite
+ * Description: Returns the value of the sdescriptions crypto suite
+ * parameter specified for the given attribute. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return the suite. If it's not,
+ * try to find the version 9. This assumes you cannot have both
+ * versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: SDP_SRTP_UNKNOWN_CRYPTO_SUITE is returned if an error was
+ * encountered otherwise the crypto suite is returned.
+ */
+
+sdp_srtp_crypto_suite_t
+sdp_attr_get_sdescriptions_crypto_suite (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* There's no version 2 so now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute suite, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_SRTP_UNKNOWN_CRYPTO_SUITE;
+ }
+ }
+
+ return attr_p->attr.srtp_context.suite;
+
+}
+
+/* Function: sdp_attr_get_sdescriptions_key
+ * Description: Returns the value of the sdescriptions master key
+ * parameter specified for the given attribute. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return the key. If it's not,
+ * try to find the version 9. This assumes you cannot have both
+ * versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: NULL if error encountered or master key salt string
+ */
+
+const char*
+sdp_attr_get_sdescriptions_key (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute key, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return NULL;
+ }
+ }
+
+ return (char*)attr_p->attr.srtp_context.master_key;
+}
+
+
+/* Function: sdp_attr_get_sdescriptions_salt
+ * Description: Returns the value of the sdescriptions master salt
+ * parameter specified for the given attribute. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return the salt. If it's not,
+ * try to find the version 9. This assumes you cannot have both
+ * versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: NULL if error encountered or master key salt string
+ */
+
+const char*
+sdp_attr_get_sdescriptions_salt (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute salt, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return NULL;
+ }
+ }
+
+ return (char*) attr_p->attr.srtp_context.master_salt;
+
+}
+
+
+
+/* Function: sdp_attr_get_sdescriptions_lifetime
+ * Description: Returns the value of the sdescriptions lifetime
+ * parameter specified for the given attribute.Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return the lifetime. If it's
+ * not, try to find the version 9. This assumes you cannot have
+ * both versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: NULL if error encountered or lifetime string
+ */
+
+const char*
+sdp_attr_get_sdescriptions_lifetime (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first. */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute lifetime, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return NULL;
+ }
+ }
+
+ return (char*)attr_p->attr.srtp_context.master_key_lifetime;
+
+}
+
+/* Function: sdp_attr_get_sdescriptions_mki
+ * Description: Returns the value of the sdescriptions MKI value and length
+ * parameter of the specified attribute instance. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return the MKI. If it's
+ * not, try to find version 9. This assumes you cannot have
+ * both versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * mki_value application provided pointer that on exit
+ * is set to the MKI value string if one exists.
+ * mki_length application provided pointer that on exit
+ * is set to the MKI length if one exists.
+ * Returns: SDP_SUCCESS no errors encountered otherwise sdp error
+ * based upon the specific error.
+ */
+
+sdp_result_e
+sdp_attr_get_sdescriptions_mki (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num,
+ const char **mki_value,
+ uint16_t *mki_length)
+{
+ sdp_attr_t *attr_p;
+
+ *mki_value = NULL;
+ *mki_length = 0;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute MKI, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+ }
+
+ *mki_value = (char*)attr_p->attr.srtp_context.mki;
+ *mki_length = attr_p->attr.srtp_context.mki_size_bytes;
+ return SDP_SUCCESS;
+
+}
+
+
+/* Function: sdp_attr_get_sdescriptions_session_params
+ * Description: Returns the unparsed session parameters string. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return session parameters. If
+ * it's not, try to find version 9. This assumes you cannot have
+ * both versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: NULL if no session parameters were received in the sdp,
+ * otherwise returns a pointer to the start of the session
+ * parameters string. Note that the calling function should
+ * not free the returned pointer.
+ */
+
+const char*
+sdp_attr_get_sdescriptions_session_params (sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute session params, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return NULL;
+ }
+ }
+
+ return attr_p->attr.srtp_context.session_parameters;
+}
+
+
+/* Function: sdp_attr_get_sdescriptions_key_size
+ * Description: Returns the master key size. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return key size. If
+ * it's not, try to find version 9. This assumes you cannot have
+ * both versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: 0 (SDP_SDESCRIPTIONS_KEY_SIZE_UNKNOWN) if error was
+ * encountered, otherwise key size.
+ */
+
+unsigned char
+sdp_attr_get_sdescriptions_key_size (sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute MKI, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_SDESCRIPTIONS_KEY_SIZE_UNKNOWN;
+ }
+ }
+
+ return attr_p->attr.srtp_context.master_key_size_bytes;
+
+}
+
+
+/* Function: sdp_attr_get_sdescriptions_salt_size
+ * Description: Returns the salt key size. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return salt size. If
+ * it's not, try to find version 9. This assumes you cannot have
+ * both versions in the same SDP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: 0 (SDP_SDESCRIPTIONS_KEY_SIZE_UNKNOWN) if error was
+ * encountered, otherwise salt size.
+ */
+
+unsigned char
+sdp_attr_get_sdescriptions_salt_size (sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num)
+{
+
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute MKI, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_SDESCRIPTIONS_KEY_SIZE_UNKNOWN;
+ }
+ }
+
+ return attr_p->attr.srtp_context.master_salt_size_bytes;
+
+}
+
+
+/* Function: sdp_attr_get_srtp_crypto_selection_flags
+ * Description: Returns the selection flags. Note that
+ * this is a common api for both version 2 and version 9
+ * sdescriptions. It has no knowledge which version is being
+ * used so it will first try to find if a version 2 sdescriptions
+ * attribute is present. If it is, return selection flags. If
+ * it's not, try to find version 9. This assumes you cannot have
+ * both versions in the same SDP.
+ * Currently only necessary for MGCP.
+ *
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * cap_num The capability number associated with the
+ * attribute if any. If none, should be zero.
+ * inst_num The attribute instance number to check.
+ * Returns: 0 (SDP_SRTP_CRYPTO_SELECTION_FLAGS_UNKNOWN) if error was
+ * encountered, otherwise selection flags.
+ */
+
+unsigned long
+sdp_attr_get_srtp_crypto_selection_flags (sdp_t *sdp_p,
+ uint16_t level,
+ uint8_t cap_num,
+ uint16_t inst_num)
+{
+
+
+ sdp_attr_t *attr_p;
+
+ /* Try version 2 first */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SRTP_CONTEXT, inst_num);
+
+ if (attr_p == NULL) {
+ /* Couldn't find version 2 now try version 9 */
+ attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_SDESCRIPTIONS, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s srtp attribute MKI, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_SRTP_CRYPTO_SELECTION_FLAGS_UNKNOWN;
+ }
+ }
+
+ return attr_p->attr.srtp_context.selection_flags;
+
+}
+
+
+
+/* Function: sdp_find_rtcp_fb_attr
+ * Description: Helper to find the nth instance of a rtcp-fb attribute of
+ * the specified feedback type.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * payload_type The payload to get the attribute for
+ * fb_type The feedback type to look for.
+ * inst_num The attribute instance number to check.
+ * Returns: Pointer to the attribute, or NULL if not found.
+ */
+
+sdp_attr_t *
+sdp_find_rtcp_fb_attr (sdp_t *sdp_p,
+ uint16_t level,
+ uint16_t payload_type,
+ sdp_rtcp_fb_type_e fb_type,
+ uint16_t inst_num)
+{
+ uint16_t attr_count=0;
+ sdp_mca_t *mca_p;
+ sdp_attr_t *attr_p;
+
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (!mca_p) {
+ return (NULL);
+ }
+ for (attr_p = mca_p->media_attrs_p; attr_p; attr_p = attr_p->next_p) {
+ if (attr_p->type == SDP_ATTR_RTCP_FB &&
+ (attr_p->attr.rtcp_fb.payload_num == payload_type ||
+ attr_p->attr.rtcp_fb.payload_num == SDP_ALL_PAYLOADS) &&
+ attr_p->attr.rtcp_fb.feedback_type == fb_type) {
+ attr_count++;
+ if (attr_count == inst_num) {
+ return (attr_p);
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Function: sdp_attr_get_rtcp_fb_ack
+ * Description: Returns the value of the rtcp-fb:...ack attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * payload_type The payload to get the attribute for
+ * inst_num The attribute instance number to check.
+ * Returns: ACK type (SDP_RTCP_FB_ACK_NOT_FOUND if not present)
+ */
+sdp_rtcp_fb_ack_type_e
+sdp_attr_get_rtcp_fb_ack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_rtcp_fb_attr(sdp_p, level, payload_type,
+ SDP_RTCP_FB_ACK, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp-fb attribute, level %u, pt %u, "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)payload_type, (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_RTCP_FB_ACK_NOT_FOUND;
+ }
+ return (attr_p->attr.rtcp_fb.param.ack);
+}
+
+/* Function: sdp_attr_get_rtcp_fb_nack
+ * Description: Returns the value of the rtcp-fb:...nack attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * payload_type The payload to get the attribute for
+ * inst_num The attribute instance number to check.
+ * Returns: NACK type (SDP_RTCP_FB_NACK_NOT_FOUND if not present)
+ */
+sdp_rtcp_fb_nack_type_e
+sdp_attr_get_rtcp_fb_nack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_rtcp_fb_attr(sdp_p, level, payload_type,
+ SDP_RTCP_FB_NACK, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp-fb attribute, level %u, pt %u, "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)payload_type, (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_RTCP_FB_NACK_NOT_FOUND;
+ }
+ return (attr_p->attr.rtcp_fb.param.nack);
+}
+
+/* Function: sdp_attr_get_rtcp_fb_trr_int
+ * Description: Returns the value of the rtcp-fb:...trr-int attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * payload_type The payload to get the attribute for
+ * inst_num The attribute instance number to check.
+ * Returns: trr-int interval (0xFFFFFFFF if not found)
+ */
+uint32_t
+sdp_attr_get_rtcp_fb_trr_int(sdp_t *sdp_p, uint16_t level,
+ uint16_t payload_type, uint16_t inst)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_rtcp_fb_attr(sdp_p, level, payload_type,
+ SDP_RTCP_FB_TRR_INT, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp-fb attribute, level %u, pt %u, "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)payload_type, (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return 0xFFFFFFFF;
+ }
+ return (attr_p->attr.rtcp_fb.param.trr_int);
+}
+
+/* Function: sdp_attr_get_rtcp_fb_remb_enabled
+ * Description: Returns true if rtcp-fb:...goog-remb attribute exists
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * payload_type The payload to get the attribute for
+ * Returns: true if rtcp-fb:...goog-remb exists
+ */
+tinybool
+sdp_attr_get_rtcp_fb_remb_enabled(sdp_t *sdp_p,
+ uint16_t level,
+ uint16_t payload_type)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_rtcp_fb_attr(sdp_p, level, payload_type,
+ SDP_RTCP_FB_REMB,
+ 1); // always check for 1st instance
+ return (attr_p? TRUE : FALSE); // either exists or not
+}
+
+/* Function: sdp_attr_get_rtcp_fb_ccm
+ * Description: Returns the value of the rtcp-fb:...ccm attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * payload_type The payload to get the attribute for
+ * inst_num The attribute instance number to check.
+ * Returns: CCM type (SDP_RTCP_FB_CCM_NOT_FOUND if not present)
+ */
+sdp_rtcp_fb_ccm_type_e
+sdp_attr_get_rtcp_fb_ccm(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_rtcp_fb_attr(sdp_p, level, payload_type,
+ SDP_RTCP_FB_CCM, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp-fb attribute, level %u, pt %u, "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)payload_type, (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_RTCP_FB_CCM_NOT_FOUND;
+ }
+ return (attr_p->attr.rtcp_fb.param.ccm);
+}
+
+/* Function: sdp_attr_set_rtcp_fb_ack
+ * Description: Sets the value of an rtcp-fb:...ack attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to set the attribute.
+ * payload_type The value to set the payload type to for
+ * this attribute. Can be SDP_ALL_PAYLOADS.
+ * inst_num The attribute instance number to check.
+ * type The ack type to indicate
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e
+sdp_attr_set_rtcp_fb_ack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst,
+ sdp_rtcp_fb_ack_type_e type)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_RTCP_FB, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp_fb ack attribute, level %u "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p->attr.rtcp_fb.payload_num = payload_type;
+ attr_p->attr.rtcp_fb.feedback_type = SDP_RTCP_FB_ACK;
+ attr_p->attr.rtcp_fb.param.ack = type;
+ attr_p->attr.rtcp_fb.extra[0] = '\0';
+ return (SDP_SUCCESS);
+}
+
+
+/* Function: sdp_attr_set_rtcp_fb_nack
+ * Description: Sets the value of an rtcp-fb:...nack attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to set the attribute.
+ * payload_type The value to set the payload type to for
+ * this attribute. Can be SDP_ALL_PAYLOADS.
+ * inst_num The attribute instance number to check.
+ * type The nack type to indicate
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e
+sdp_attr_set_rtcp_fb_nack(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst,
+ sdp_rtcp_fb_nack_type_e type)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_RTCP_FB, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp_fb nack attribute, level %u "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p->attr.rtcp_fb.payload_num = payload_type;
+ attr_p->attr.rtcp_fb.feedback_type = SDP_RTCP_FB_NACK;
+ attr_p->attr.rtcp_fb.param.nack = type;
+ attr_p->attr.rtcp_fb.extra[0] = '\0';
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_set_rtcp_fb_trr_int
+ * Description: Sets the value of an rtcp-fb:...trr-int attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to set the attribute.
+ * payload_type The value to set the payload type to for
+ * this attribute. Can be SDP_ALL_PAYLOADS.
+ * inst_num The attribute instance number to check.
+ * interval The interval time to indicate
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e
+sdp_attr_set_rtcp_fb_trr_int(sdp_t *sdp_p, uint16_t level, uint16_t payload_type,
+ uint16_t inst, uint32_t interval)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_RTCP_FB, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp_fb trr-int attribute, level %u "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p->attr.rtcp_fb.payload_num = payload_type;
+ attr_p->attr.rtcp_fb.feedback_type = SDP_RTCP_FB_TRR_INT;
+ attr_p->attr.rtcp_fb.param.trr_int = interval;
+ attr_p->attr.rtcp_fb.extra[0] = '\0';
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_set_rtcp_fb_remb
+ * Description: Sets the value of an rtcp-fb:...goog-remb attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to set the attribute.
+ * payload_type The value to set the payload type to for
+ * this attribute. Can be SDP_ALL_PAYLOADS.
+ * inst_num The attribute instance number to check.
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e
+sdp_attr_set_rtcp_fb_remb(sdp_t *sdp_p, uint16_t level, uint16_t payload_type,
+ uint16_t inst)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_RTCP_FB, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp_fb goog-remb attribute, level %u "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p->attr.rtcp_fb.payload_num = payload_type;
+ attr_p->attr.rtcp_fb.feedback_type = SDP_RTCP_FB_REMB;
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_set_rtcp_fb_ccm
+ * Description: Sets the value of an rtcp-fb:...ccm attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to set the attribute.
+ * payload_type The value to set the payload type to for
+ * this attribute. Can be SDP_ALL_PAYLOADS.
+ * inst_num The attribute instance number to check.
+ * type The ccm type to indicate
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e
+sdp_attr_set_rtcp_fb_ccm(sdp_t *sdp_p, uint16_t level, uint16_t payload_type, uint16_t inst,
+ sdp_rtcp_fb_ccm_type_e type)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_RTCP_FB, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s rtcp_fb ccm attribute, level %u "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ attr_p->attr.rtcp_fb.payload_num = payload_type;
+ attr_p->attr.rtcp_fb.feedback_type = SDP_RTCP_FB_CCM;
+ attr_p->attr.rtcp_fb.param.ccm = type;
+ attr_p->attr.rtcp_fb.extra[0] = '\0';
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_attr_get_extmap_uri
+ * Description: Returns a pointer to the value of the encoding name
+ * parameter specified for the given attribute. Value is
+ * returned as a const ptr and so cannot be modified by the
+ * application. If the given attribute is not defined, NULL
+ * will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * Returns: Codec value or SDP_CODEC_INVALID.
+ */
+const char *sdp_attr_get_extmap_uri(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_EXTMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s extmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (NULL);
+ } else {
+ return (attr_p->attr.extmap.uri);
+ }
+}
+
+/* Function: sdp_attr_get_extmap_id
+ * Description: Returns the id of the extmap specified for the given
+ * attribute. If the given attribute is not defined, 0xFFFF
+ * will be returned.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to check for the attribute.
+ * inst_num The attribute instance number to check.
+ * Returns: The id of the extmap attribute.
+ */
+uint16_t sdp_attr_get_extmap_id(sdp_t *sdp_p, uint16_t level,
+ uint16_t inst_num)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_EXTMAP, inst_num);
+ if (attr_p == NULL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s extmap attribute, level %u instance %u "
+ "not found.", sdp_p->debug_str, (unsigned)level, (unsigned)inst_num);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return 0xFFFF;
+ } else {
+ return (attr_p->attr.extmap.id);
+ }
+}
+
+/* Function: sdp_attr_set_extmap
+ * Description: Sets the value of an rtcp-fb:...ccm attribute
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * level The level to set the attribute.
+ * id The id to set the attribute.
+ * uri The uri to set the attribute.
+ * inst The attribute instance number to check.
+ * Returns: SDP_SUCCESS Attribute param was set successfully.
+ * SDP_INVALID_PARAMETER Specified attribute is not defined.
+ */
+sdp_result_e
+sdp_attr_set_extmap(sdp_t *sdp_p, uint16_t level, uint16_t id, const char* uri, uint16_t inst)
+{
+ sdp_attr_t *attr_p;
+
+ attr_p = sdp_find_attr(sdp_p, level, 0, SDP_ATTR_EXTMAP, inst);
+ if (!attr_p) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s extmap attribute, level %u "
+ "instance %u not found.", sdp_p->debug_str, (unsigned)level,
+ (unsigned)inst);
+ }
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ attr_p->attr.extmap.id = id;
+ sstrncpy(attr_p->attr.extmap.uri, uri, SDP_MAX_STRING_LEN+1);
+ return (SDP_SUCCESS);
+}
+
+const char *sdp_attr_get_msid_identifier(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst)
+{
+ sdp_attr_t *attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_MSID, inst);
+ if (!attr_p) {
+ return NULL;
+ }
+ return attr_p->attr.msid.identifier;
+}
+
+const char *sdp_attr_get_msid_appdata(sdp_t *sdp_p, uint16_t level,
+ uint8_t cap_num, uint16_t inst)
+{
+ sdp_attr_t *attr_p = sdp_find_attr(sdp_p, level, cap_num,
+ SDP_ATTR_MSID, inst);
+ if (!attr_p) {
+ return NULL;
+ }
+ return attr_p->attr.msid.appdata;
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_base64.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_base64.c
new file mode 100644
index 000000000..80f1eb52d
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_base64.c
@@ -0,0 +1,403 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_base64.h"
+
+/*
+ * Local definitions for Base64 to Raw table entries.
+ */
+#define INVALID_CHAR 0xFF /* Character not in supported Base64 set */
+#define WHITE_SPACE 0xFE /* Space, tab, newline, etc character */
+#define PADDING 0xFD /* The character '=' */
+
+#define PAD_CHAR '=' /* The character '=' */
+
+/* Maximum length of a base64 encoded line */
+#define MAX_BASE64_LINE_LENGTH 76
+
+/*
+ * base64_result_table
+ * String table for translating base64 error codes into human
+ * understanable strings.
+ */
+char *base64_result_table[BASE64_RESULT_MAX] =
+{
+ "Base64 successful",
+ "Base64 Buffer Overrun",
+ "Base64 Bad Data",
+ "Base64 Bad Padding",
+ "Base64 Bad Block Size"
+};
+
+/*
+ * base64_to_raw_table
+ * Heart of the Base64 decoding algorithm. Lookup table to convert
+ * the Base64 characters into their specified representative values.
+ * Invalid characters are marked with 0xFF, white space characters
+ * are marked with 0xFE, and the special pading character is marked
+ * with 0xFD.
+ */
+unsigned char base64_to_raw_table[128] =
+{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, /* 0-9 */
+ 0xFE, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 10-19 */
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 20-29 */
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* 30-39 */
+ 0xFF, 0xFF, 0xFF, 62, 0xFF, 0xFF, 0xFF, 63, 52, 53, /* 40-49 */
+ 54, 55, 56, 57, 58, 59, 60, 61, 0xFF, 0xFF, /* 50-59 */
+ 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0, 1, 2, 3, 4, /* 60-69 */
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 70-79 */
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, /* 80-89 */
+ 25, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 26, 27, 28, /* 90-99 */
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, /* 100-109 */
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, /* 110-119 */
+ 49, 50, 51, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF /* 120-127 */
+};
+
+unsigned char raw_to_base64_table[64] =
+{
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0-9 */
+ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10-19 */
+ 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20-29 */
+ 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', /* 30-39 */
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', /* 40-49 */
+ 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', /* 50-59 */
+ '8', '9', '+', '/' /* 60-63 */
+};
+
+/*
+ * base64_encode_size_bytes
+ *
+ * DESCRIPTION
+ * Estimates the size of buffer required for holding the result of
+ * encoding data of size raw_size_bytes.
+ *
+ * PARAMETERS
+ * raw_size_bytes = Estimated size of the un-encoded data in bytes.
+ *
+ * RETURN VALUE
+ * The size of destination buffer to use for encoding in bytes.
+ */
+int base64_est_encode_size_bytes (int raw_size_bytes)
+{
+ int length;
+
+ /*
+ * Find the number of bytes needed to represent the data
+ * using a 4/3 expansion ratio. That result must be
+ * rounded to the next higher multiple of four to account
+ * for padding. Then add in a term to account for any '\n's
+ * added.
+ */
+ length = ((((raw_size_bytes * 4 + 2)/ 3) + 3) & ~(0x3)) +
+ raw_size_bytes / MAX_BASE64_LINE_LENGTH;
+
+ return length;
+}
+
+/*
+ * base64_decode_size_bytes
+ *
+ * DESCRIPTION
+ * Estimates the size of buffer required for holding the result of
+ * decoding data of size base64_size_bytes.
+ *
+ * PARAMETERS
+ * base64_size_bytes = Estimated size of the Base64 data in bytes.
+ *
+ * RETURN VALUE
+ * The size of destination buffer to use for decoding in bytes.
+ */
+int base64_est_decode_size_bytes (int base64_size_bytes)
+{
+ int length;
+
+ length = (base64_size_bytes * 3 + 3) / 4;
+ return length;
+}
+
+/*
+ * base64_encode
+ *
+ * DESCRIPTION
+ * Encode data pointed to by src into the buffer pointer to by dest
+ * using the Base64 algorithm.
+ *
+ * NOTE: No trailing '\n' character will be added.
+ *
+ * NOTE: As per specification, '\n' will be placed every 76 chars.
+ *
+ * PARAMETERS
+ * src = Pointer to the raw data to base64 encode.
+ * src_bytes = The number of bytes in the src buffer to encode.
+ * dest = Pointer to the destination buffer where the converted data
+ * will reside when complete.
+ * dest_bytes = Initially holds the size of the destination buffer
+ * but at completion holds the number of bytes converted.
+ *
+ * RETURN VALUE
+ * base64_success if the buffer was successfully converted, the
+ * appropriate error code otherwise.
+ *
+ * The dest parameter holds the converted data.
+ *
+ * The dest_bytes parameter holds the actual number of bytes converted.
+ */
+base64_result_t base64_encode(unsigned char *src, int src_bytes, unsigned char *dest, int *dest_bytes)
+{
+ int i, j=0;
+ int line_count = 0;
+ unsigned char index; /* index into base64 lookup table */
+ int smax = src_bytes-2; /* only do full multiples of 3 */
+ int dmax = *dest_bytes; /* destination maximum */
+
+ *dest_bytes = 0;
+
+ /* Do full groups. Base64 must be done in blocks of 3 src bytes */
+ for (i=0; i<smax; i+=3) {
+ /* Check to see if newline should be injected */
+ if (line_count>=MAX_BASE64_LINE_LENGTH) {
+ if (j<dmax){
+ dest[j++] = '\n';
+ } else {
+ return BASE64_BUFFER_OVERRUN;
+ }
+ line_count = 0;
+ }
+
+ line_count += 4;
+
+ if ((j+3) < dmax) {
+
+ /* Find mapping of upper 6 bits */
+ index = (src[i] >> 2) & 0x3F;
+ dest[j++] = raw_to_base64_table[index];
+
+ /* bottom 2 bits of first word, high 4 bits of second word */
+ index = ((src[i] << 4) & 0x30) | ((src[i+1] >> 4) & 0x0F);
+ dest[j++] = raw_to_base64_table[index];
+
+ /* bottom 4 bits of second word, high 2 bits of third word */
+ index = ((src[i+1] << 2) & 0x3C) | ((src[i+2] >> 6) & 0x03);
+ dest[j++] = raw_to_base64_table[index];
+
+ /* bottom 6 bits of third word */
+ index = src[i+2] & 0x3F;
+ dest[j++] = raw_to_base64_table[index];
+ } else {
+ return BASE64_BUFFER_OVERRUN;
+ }
+ }
+
+ /* Check to see if any more work must be done */
+ if (i<src_bytes) {
+
+ /* Check to see if a newline should be output */
+ if (line_count>=MAX_BASE64_LINE_LENGTH) {
+ if (j<dmax){
+ dest[j++] = '\n';
+ } else {
+ return BASE64_BUFFER_OVERRUN;
+ }
+ line_count = 0;
+ }
+
+ line_count += 4;
+
+ /* Must fill another quantum */
+ if (j+4>dmax) {
+ /* No room left in output buffer! */
+ return BASE64_BUFFER_OVERRUN;
+ }
+
+ /* Find mapping of upper 6 bits */
+ index = (src[i] >> 2) & 0x3F;
+ dest[j++] = raw_to_base64_table[index];
+
+ /* check for another stragler */
+ if ((i+1)<src_bytes) {
+ /* bottom 2 bits of first word, high 4 bits of second word */
+ index = ((src[i] << 4) & 0x30) | ((src[i+1] >> 4) & 0x0F);
+ dest[j++] = raw_to_base64_table[index];
+
+ /* bottom 4 bits of second word */
+ index = (src[i+1] << 2) & 0x3C;
+ dest[j++] = raw_to_base64_table[index];
+ dest[j++] = PAD_CHAR;
+ } else {
+ /* bottom 2 bits of first word */
+ index = (src[i] << 4) & 0x30;
+ dest[j++] = raw_to_base64_table[index];
+ dest[j++] = PAD_CHAR;
+ dest[j++] = PAD_CHAR;
+ }
+ }
+
+ *dest_bytes = j;
+
+ return BASE64_SUCCESS;
+}
+
+unsigned char base64_decode_get_raw(unsigned char index)
+{
+ /* only have 128 values, MSB must not be set! */
+ if (index >= 128) {
+ return INVALID_CHAR;
+ }
+ return base64_to_raw_table[index];
+}
+
+/*
+ * base64_decode
+ *
+ * DESCRIPTION
+ * Decode data pointed to by src into the buffer pointer to by dest
+ * using the Base64 algorithm.
+ *
+ * PARAMETERS
+ * src = Pointer to the Base64 data to decode.
+ * src_bytes = The number of bytes in the src buffer to decode.
+ * dest = Pointer to the destination buffer where the converted data
+ * will reside when complete.
+ * dest_bytes = Initially holds the size of the destination buffer
+ * but at completion holds the number of bytes converted.
+ *
+ * RETURN VALUE
+ * base64_success if the buffer was successfully converted, the
+ * appropriate error code otherwise.
+ *
+ * The dest parameter holds the converted data.
+ *
+ * The dest_bytes parameter holds the actual number of bytes converted.
+ */
+base64_result_t base64_decode(unsigned char *src, int src_bytes, unsigned char *dest, int *dest_bytes)
+{
+ int i, j = 0;
+ int sindex = 0; /* Current NON-whitespace source
+ * index */
+ int pad_count=0; /* Number of padding characters
+ * encountered */
+ int dest_size_bytes = *dest_bytes; /* Save size of destination buffer */
+ unsigned char cindex; /* The current Base64 character to
+ * process */
+ unsigned char val; /* The value of the current Base64
+ * character */
+
+ *dest_bytes = 0;
+
+ for (i=0; i<src_bytes; i++) {
+ cindex = src[i];
+
+ val = base64_decode_get_raw(cindex);
+ if (val == INVALID_CHAR) {
+ /* Invalid base64 character */
+ return BASE64_BAD_DATA;
+ }
+
+ if (val == WHITE_SPACE) {
+ /* Ignore white space */
+ continue;
+ }
+
+ if (val == PADDING) {
+ /* we must be at the end-finish up */
+ pad_count++;
+ if (++i<src_bytes) {
+ /* can have up to 2 pad chars */
+ if (base64_decode_get_raw(src[i]) != PADDING) {
+ return BASE64_BAD_PADDING;
+ }
+
+ if (++i<src_bytes) {
+ /* should not have any more padding! */
+ return BASE64_BAD_PADDING;
+ }
+
+ pad_count++;
+ }
+
+ /* DONE! */
+ break;
+ }
+
+ /* Determine which portion of the 3 bytes this data will fill */
+ switch (sindex & 0x3) {
+ case 0:
+ /* Fill upper 6 bits */
+ if (j<dest_size_bytes) {
+ dest[j] = val << 2;
+ } else {
+ return BASE64_BUFFER_OVERRUN;
+ }
+ break;
+ case 1:
+ /* Fill Bottom 2 bits */
+ dest[j++] |= val >> 4;
+
+ if (j<dest_size_bytes) {
+ /* Fill Top 4 bits */
+ dest[j] = (val << 4) & 0xF0;
+ } else {
+ /*
+ * Check to see if there is any more data present.
+ * Next base64 character MUST be a pad character and
+ * the rest of this data MUST be zero.
+ *
+ * If this is not the end of data then a buffer overrun
+ * has occurred
+ */
+ if ((val & 0x0F) ||
+ (i+1>=src_bytes) ||
+ (base64_decode_get_raw(src[i+1]) != PADDING)) {
+ return BASE64_BUFFER_OVERRUN;
+ }
+ }
+ break;
+ case 2:
+ /* Fill Bottom 4 bits */
+ dest[j++] |= val >> 2;
+
+ if (j<dest_size_bytes) {
+ /* Fill Top 2 bits */
+ dest[j] = (val << 6) & 0xC0;
+ } else {
+ /*
+ * Check to see if there is any more data present.
+ * Next base64 character MUST be a pad character and
+ * the rest of this data MUST be zero.
+ *
+ * If this is not the end of data then a buffer overrun
+ * has occurred
+ */
+ if ((val & 0x03) ||
+ (i+1>=src_bytes) ||
+ (base64_decode_get_raw(src[i+1]) != PADDING)) {
+ return BASE64_BUFFER_OVERRUN;
+ }
+ }
+ break;
+ case 3:
+ /*
+ * No need to check for overrun here since the
+ * previous case was already checked. If another
+ * group is present then case 0 will check again.
+ */
+
+ /* Fill Bottom 6 bits */
+ dest[j++] |= val;
+ break;
+ }
+ sindex++;
+ }
+
+ /* Check length for multiple of 3 bytes */
+ if (((j + pad_count)% 3) != 0) {
+ return BASE64_BAD_BLOCK_SIZE;
+ }
+
+ /* Save off the number of bytes converted */
+ *dest_bytes = j;
+
+ return BASE64_SUCCESS;
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_base64.h b/media/webrtc/signaling/src/sdp/sipcc/sdp_base64.h
new file mode 100644
index 000000000..e264245b7
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_base64.h
@@ -0,0 +1,42 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDP_BASE64_H_
+#define _SDP_BASE64_H_
+
+/*
+ * base64_result_t
+ * Enumeration of the result codes for Base64 conversion.
+ */
+typedef enum base64_result_t_ {
+ BASE64_INVALID=-1,
+ BASE64_SUCCESS=0,
+ BASE64_BUFFER_OVERRUN,
+ BASE64_BAD_DATA,
+ BASE64_BAD_PADDING,
+ BASE64_BAD_BLOCK_SIZE,
+ BASE64_RESULT_MAX
+} base64_result_t;
+
+#define MAX_BASE64_STRING_LEN 60
+
+/* Result code string table */
+extern char *base64_result_table[];
+
+/*
+ * BASE64_RESULT_TO_STRING
+ * Macro to convert a Base64 result code into a human readable string.
+ */
+#define BASE64_RESULT_TO_STRING(_result) (((_result)>=0 && (_result)<BASE64_RESULT_MAX)?(base64_result_table[_result]):("UNKNOWN Result Code"))
+
+/* Prototypes */
+
+int base64_est_encode_size_bytes(int raw_size_bytes);
+int base64_est_decode_size_bytes(int base64_size_bytes);
+
+base64_result_t base64_encode(unsigned char *src, int src_bytes, unsigned char *dest, int *dest_bytes);
+
+base64_result_t base64_decode(unsigned char *src, int src_bytes, unsigned char *dest, int *dest_bytes);
+
+#endif /* _SDP_BASE64_H_ */
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_config.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_config.c
new file mode 100644
index 000000000..74ccf6b32
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_config.c
@@ -0,0 +1,241 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+
+static const char* logTag = "sdp_config";
+
+/* Function: void *sdp_init_config()
+ * Description: Initialize SDP configuration structure with the
+ * following defaults:
+ * All debug levels turned OFF.
+ * All token lines required per RFC2327.
+ * No media types supported.
+ * No network types supported.
+ * No address types supported.
+ * No transport types supported.
+ * Parameters: None.
+ * Returns: A handle for the configuration as a void ptr.
+ */
+sdp_conf_options_t *sdp_init_config ()
+{
+ int i;
+ sdp_conf_options_t *conf_p;
+
+ conf_p = SDP_MALLOC(sizeof(sdp_conf_options_t));
+
+ if (!conf_p) {
+ CSFLogError(logTag, "SDP: could not allocate configuration object.");
+ return NULL;
+ }
+
+ /* Set default debug flags. */
+ conf_p->debug_flag[SDP_DEBUG_TRACE] = FALSE;
+ conf_p->debug_flag[SDP_DEBUG_WARNINGS] = FALSE;
+ conf_p->debug_flag[SDP_DEBUG_ERRORS] = FALSE;
+
+ /* Set required lines flags. Note: Only need to set those that */
+ /* are questionable. Most lines aren't required by default. */
+ conf_p->version_reqd = TRUE;
+ conf_p->owner_reqd = TRUE;
+ conf_p->session_name_reqd = TRUE;
+ conf_p->timespec_reqd = TRUE;
+
+ /* No media types supported by default. */
+ for (i=0; i < SDP_MAX_MEDIA_TYPES; i++) {
+ conf_p->media_supported[i] = FALSE;
+ }
+
+ /* No network types supported by default. */
+ for (i=0; i < SDP_MAX_NETWORK_TYPES; i++) {
+ conf_p->nettype_supported[i] = FALSE;
+ }
+
+ /* No address types supported by default. */
+ for (i=0; i < SDP_MAX_ADDR_TYPES; i++) {
+ conf_p->addrtype_supported[i] = FALSE;
+ }
+
+ /* No transport types supported by default. */
+ for (i=0; i < SDP_MAX_TRANSPORT_TYPES; i++) {
+ conf_p->transport_supported[i] = FALSE;
+ }
+
+ /* No choose parameters allowed by default. */
+ for (i=0; i < SDP_MAX_CHOOSE_PARAMS; i++) {
+ conf_p->allow_choose[i] = FALSE;
+ }
+
+ /* Initialize statistics counts */
+ conf_p->num_parses = 0;
+ conf_p->num_builds = 0;
+ conf_p->num_not_sdp_desc = 0;
+ conf_p->num_invalid_token_order = 0;
+ conf_p->num_invalid_param = 0;
+ conf_p->num_no_resource = 0;
+
+ /* Parse error handler stuff */
+ conf_p->error_handler = NULL;
+ conf_p->error_handler_context = NULL;
+
+ CSFLogInfo(logTag, "SDP: Initialized config pointer: %p", conf_p);
+
+ return (conf_p);
+}
+
+void sdp_free_config(sdp_conf_options_t* conf_p) {
+ if (conf_p) {
+ SDP_FREE(conf_p);
+ }
+}
+
+/* Function: void sdp_appl_debug(sdp_conf_options_t *conf_p, sdp_debug_e debug_type,
+ * tinybool my_bool);
+ * Description: Define the default type of debug for the application.
+ * Valid debug types are ERRORS, WARNINGS, and TRACE. Each
+ * debug type can be turned on/off individually. The
+ * default debug level can be redefined at any time.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * debug_type Specifies the debug type being enabled/disabled.
+ * debug_flag Defines whether the debug should be enabled or not.
+ * Returns: Nothing.
+ */
+void sdp_appl_debug (sdp_conf_options_t *conf_p, sdp_debug_e debug_type,
+ tinybool debug_flag)
+{
+ if (debug_type < SDP_MAX_DEBUG_TYPES) {
+ conf_p->debug_flag[debug_type] = debug_flag;
+ }
+}
+
+
+/* Functions: void sdp_require_version
+ * void sdp_require_owner
+ * void sdp_require_session_name
+ * void sdp_require_timespec
+ * Description: These functions allow the application to not require several
+ * of the tokens that are specifically required by RFC 2327.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * version_required TRUE or FALSE whether the token should
+ * be required.
+ * Returns: Nothing.
+ */
+void sdp_require_version (sdp_conf_options_t *conf_p, tinybool version_required)
+{
+ conf_p->version_reqd = version_required;
+}
+
+void sdp_require_owner (sdp_conf_options_t *conf_p, tinybool owner_required)
+{
+ conf_p->owner_reqd = owner_required;
+}
+
+void sdp_require_session_name (sdp_conf_options_t *conf_p, tinybool sess_name_required)
+{
+ conf_p->session_name_reqd = sess_name_required;
+}
+
+void sdp_require_timespec (sdp_conf_options_t *conf_p, tinybool timespec_required)
+{
+ conf_p->timespec_reqd = timespec_required;
+}
+
+
+/* Function: sdp_media_supported
+ * Description: These functions allow the application to specify which
+ * media types it supports. The application must set any/all
+ * as required. No media types are supported by default.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * nettype The network type for which support is being set.
+ * media_supported TRUE or FALSE whether the support is provided.
+ * Returns: Nothing.
+ */
+void sdp_media_supported (sdp_conf_options_t *conf_p, sdp_media_e media_type,
+ tinybool media_supported)
+{
+ conf_p->media_supported[media_type] = media_supported;
+}
+
+
+/* Function: sdp_nettype_supported
+ * Description: This function allows the application to specify which
+ * network types it supports. The application must set
+ * any/all as required. No network types are supported by
+ * default.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * nettype The network type for which support is being set.
+ * nettype_supported TRUE or FALSE whether the support is
+ * provided.
+ * Returns: Nothing.
+ */
+void sdp_nettype_supported (sdp_conf_options_t *conf_p, sdp_nettype_e nettype,
+ tinybool nettype_supported)
+{
+ conf_p->nettype_supported[nettype] = nettype_supported;
+}
+
+
+/* Function: sdp_addrtype_supported
+ * Description: This function allows the application to specify which
+ * address types it supports. The application must set
+ * any/all as required. No address types are supported by
+ * default.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * addrtype The address type for which support is being set.
+ * addrtype_supported TRUE or FALSE whether the support is
+ * provided.
+ * Returns: Nothing.
+ */
+void sdp_addrtype_supported (sdp_conf_options_t *conf_p, sdp_addrtype_e addrtype,
+ tinybool addrtype_supported)
+{
+ conf_p->addrtype_supported[addrtype] = addrtype_supported;
+}
+
+
+/* Function: sdp_transport_supported
+ * Description: This function allows the application to specify which
+ * transport types it supports. The application must set
+ * any/all as required. No transport types are supported
+ * by default.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * transport The transport type for which support is being set.
+ * transport_supported TRUE or FALSE whether the support is
+ * provided.
+ * Returns: Nothing.
+ */
+void sdp_transport_supported (sdp_conf_options_t *conf_p, sdp_transport_e transport,
+ tinybool transport_supported)
+{
+ conf_p->transport_supported[transport] = transport_supported;
+}
+
+
+/* Function: sdp_allow_choose
+ * Description: These functions allow the CHOOSE parameter `$' to be
+ * specified in place of certain parameters.
+ * Parameters: conf_p The config handle returned by sdp_init_config.
+ * param The param that may or may not be CHOOSE.
+ * choose_allowed TRUE or FALSE whether the CHOOSE parameter
+ * should be allowed.
+ * Returns: Nothing.
+ */
+void sdp_allow_choose (sdp_conf_options_t *conf_p, sdp_choose_param_e param, tinybool choose_allowed)
+{
+ if (param < SDP_MAX_CHOOSE_PARAMS) {
+ conf_p->allow_choose[param] = choose_allowed;
+ }
+}
+
+void sdp_config_set_error_handler(sdp_conf_options_t *conf_p,
+ sdp_parse_error_handler handler,
+ void *context)
+{
+ conf_p->error_handler = handler;
+ conf_p->error_handler_context = context;
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_main.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_main.c
new file mode 100644
index 000000000..da66c1c07
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_main.c
@@ -0,0 +1,1342 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+
+static const char* logTag = "sdp_main";
+
+/* Note: These *must* be in the same order as the enum types. */
+const sdp_tokenarray_t sdp_token[SDP_MAX_TOKENS] =
+{
+ {"v=", sdp_parse_version, sdp_build_version },
+ {"o=", sdp_parse_owner, sdp_build_owner },
+ {"s=", sdp_parse_sessname, sdp_build_sessname },
+ {"i=", sdp_parse_sessinfo, sdp_build_sessinfo },
+ {"u=", sdp_parse_uri, sdp_build_uri },
+ {"e=", sdp_parse_email, sdp_build_email },
+ {"p=", sdp_parse_phonenum, sdp_build_phonenum },
+ {"c=", sdp_parse_connection, sdp_build_connection },
+ {"b=", sdp_parse_bandwidth, sdp_build_bandwidth },
+ {"t=", sdp_parse_timespec, sdp_build_timespec },
+ {"r=", sdp_parse_repeat_time, sdp_build_repeat_time },
+ {"z=", sdp_parse_timezone_adj, sdp_build_timezone_adj },
+ {"k=", sdp_parse_encryption, sdp_build_encryption },
+ {"a=", sdp_parse_attribute, sdp_build_attribute },
+ {"m=", sdp_parse_media, sdp_build_media }
+};
+
+
+/* Note: These *must* be in the same order as the enum types. */
+const sdp_attrarray_t sdp_attr[SDP_MAX_ATTR_TYPES] =
+{
+ {"bearer", sizeof("bearer"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"called", sizeof("called"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"connection_type", sizeof("connection_type"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"dialed", sizeof("dialed"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"dialing", sizeof("dialing"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"direction", sizeof("direction"),
+ sdp_parse_attr_comediadir, sdp_build_attr_comediadir },
+ {"eecid", sizeof("eecid"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"fmtp", sizeof("fmtp"),
+ sdp_parse_attr_fmtp, sdp_build_attr_fmtp },
+ {"sctpmap", sizeof("sctpmap"),
+ sdp_parse_attr_sctpmap, sdp_build_attr_sctpmap },
+ {"framing", sizeof("framing"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"inactive", sizeof("inactive"),
+ sdp_parse_attr_direction, sdp_build_attr_direction },
+ {"ptime", sizeof("ptime"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"qos", sizeof("qos"),
+ sdp_parse_attr_qos, sdp_build_attr_qos },
+ {"curr", sizeof("curr"),
+ sdp_parse_attr_curr, sdp_build_attr_curr },
+ {"des", sizeof("des"),
+ sdp_parse_attr_des, sdp_build_attr_des},
+ {"conf", sizeof("conf"),
+ sdp_parse_attr_conf, sdp_build_attr_conf},
+ {"recvonly", sizeof("recvonly"),
+ sdp_parse_attr_direction, sdp_build_attr_direction },
+ {"rtpmap", sizeof("rtpmap"),
+ sdp_parse_attr_transport_map, sdp_build_attr_transport_map },
+ {"secure", sizeof("secure"),
+ sdp_parse_attr_qos, sdp_build_attr_qos },
+ {"sendonly", sizeof("sendonly"),
+ sdp_parse_attr_direction, sdp_build_attr_direction },
+ {"sendrecv", sizeof("sendrecv"),
+ sdp_parse_attr_direction, sdp_build_attr_direction },
+ {"subnet", sizeof("subnet"),
+ sdp_parse_attr_subnet, sdp_build_attr_subnet },
+ {"T38FaxVersion", sizeof("T38FaxVersion"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"T38MaxBitRate", sizeof("T38MaxBitRate"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"T38FaxFillBitRemoval", sizeof("T38FaxFillBitRemoval"),
+ sdp_parse_attr_simple_bool, sdp_build_attr_simple_bool },
+ {"T38FaxTranscodingMMR", sizeof("T38FaxTranscodingMMR"),
+ sdp_parse_attr_simple_bool, sdp_build_attr_simple_bool },
+ {"T38FaxTranscodingJBIG", sizeof("T38FaxTranscodingJBIG"),
+ sdp_parse_attr_simple_bool, sdp_build_attr_simple_bool },
+ {"T38FaxRateManagement", sizeof("T38FaxRateManagement"),
+ sdp_parse_attr_t38_ratemgmt, sdp_build_attr_t38_ratemgmt },
+ {"T38FaxMaxBuffer", sizeof("T38FaxMaxBuffer"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"T38FaxMaxDatagram", sizeof("T38FaxMaxDatagram"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"T38FaxUdpEC", sizeof("T38FaxUdpEC"),
+ sdp_parse_attr_t38_udpec, sdp_build_attr_t38_udpec },
+ {"X-cap", sizeof("X-cap"),
+ sdp_parse_attr_cap, sdp_build_attr_cap },
+ {"X-cpar", sizeof("X-cpar"),
+ sdp_parse_attr_cpar, sdp_build_attr_cpar },
+ {"X-pc-codec", sizeof("X-pc-codec"),
+ sdp_parse_attr_pc_codec, sdp_build_attr_pc_codec },
+ {"X-pc-qos", sizeof("X-pc-qos"),
+ sdp_parse_attr_qos, sdp_build_attr_qos },
+ {"X-qos", sizeof("X-qos"),
+ sdp_parse_attr_qos, sdp_build_attr_qos },
+ {"X-sqn", sizeof("X-sqn"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"TMRGwXid", sizeof("TMRGwXid"),
+ sdp_parse_attr_simple_bool, sdp_build_attr_simple_bool },
+ {"TC1PayloadBytes", sizeof("TC1PayloadBytes"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"TC1WindowSize", sizeof("TC1WindowSize"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"TC2PayloadBytes", sizeof("TC2PayloadBytes"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"TC2WindowSize", sizeof("TC2WindowSize"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"rtcp", sizeof("rtcp"),
+ sdp_parse_attr_rtcp, sdp_build_attr_rtcp },
+ {"rtr", sizeof("rtr"),
+ sdp_parse_attr_rtr, sdp_build_attr_rtr},
+ {"silenceSupp", sizeof("silenceSupp"),
+ sdp_parse_attr_silencesupp, sdp_build_attr_silencesupp },
+ {"X-crypto", sizeof("X-crypto"),
+ sdp_parse_attr_srtpcontext, sdp_build_attr_srtpcontext },
+ {"mptime", sizeof("mptime"),
+ sdp_parse_attr_mptime, sdp_build_attr_mptime },
+ {"X-sidin", sizeof("X-sidin"),
+ sdp_parse_attr_x_sidin, sdp_build_attr_x_sidin },
+ {"X-sidout", sizeof("X-sidout"),
+ sdp_parse_attr_x_sidout, sdp_build_attr_x_sidout },
+ {"X-confid", sizeof("X-confid"),
+ sdp_parse_attr_x_confid, sdp_build_attr_x_confid },
+ {"group", sizeof("group"),
+ sdp_parse_attr_group, sdp_build_attr_group },
+ {"mid", sizeof("mid"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"source-filter", sizeof("source-filter"),
+ sdp_parse_attr_source_filter, sdp_build_source_filter},
+ {"rtcp-unicast", sizeof("rtcp-unicast"),
+ sdp_parse_attr_rtcp_unicast, sdp_build_attr_rtcp_unicast},
+ {"maxprate", sizeof("maxprate"),
+ sdp_parse_attr_maxprate, sdp_build_attr_simple_string},
+ {"sqn", sizeof("sqn"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"cdsc", sizeof("cdsc"),
+ sdp_parse_attr_cap, sdp_build_attr_cap },
+ {"cpar", sizeof("cpar"),
+ sdp_parse_attr_cpar, sdp_build_attr_cpar },
+ {"sprtmap", sizeof("sprtmap"),
+ sdp_parse_attr_transport_map, sdp_build_attr_transport_map },
+ {"crypto", sizeof("crypto"),
+ sdp_parse_attr_sdescriptions, sdp_build_attr_sdescriptions },
+ {"label", sizeof("label"),
+ sdp_parse_attr_simple_string, sdp_build_attr_simple_string },
+ {"framerate", sizeof("framerate"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32 },
+ {"candidate", sizeof("candidate"),
+ sdp_parse_attr_ice_attr, sdp_build_attr_ice_attr },
+ {"ice-ufrag", sizeof("ice-ufrag"),
+ sdp_parse_attr_ice_attr, sdp_build_attr_ice_attr },
+ {"ice-pwd", sizeof("ice-pwd"),
+ sdp_parse_attr_ice_attr, sdp_build_attr_ice_attr},
+ {"ice-lite", sizeof("ice-lite"),
+ sdp_parse_attr_simple_flag, sdp_build_attr_simple_flag},
+ {"rtcp-mux", sizeof("rtcp-mux"),
+ sdp_parse_attr_simple_flag, sdp_build_attr_simple_flag},
+ {"fingerprint", sizeof("fingerprint"),
+ sdp_parse_attr_complete_line, sdp_build_attr_simple_string},
+ {"maxptime", sizeof("maxptime"),
+ sdp_parse_attr_simple_u32, sdp_build_attr_simple_u32},
+ {"rtcp-fb", sizeof("rtcp-fb"),
+ sdp_parse_attr_rtcp_fb, sdp_build_attr_rtcp_fb},
+ {"setup", sizeof("setup"),
+ sdp_parse_attr_setup, sdp_build_attr_setup},
+ {"connection", sizeof("connection"),
+ sdp_parse_attr_connection, sdp_build_attr_connection},
+ {"extmap", sizeof("extmap"),
+ sdp_parse_attr_extmap, sdp_build_attr_extmap},
+ {"identity", sizeof("identity"),
+ sdp_parse_attr_long_line, sdp_build_attr_long_line},
+ {"msid", sizeof("msid"),
+ sdp_parse_attr_msid, sdp_build_attr_msid},
+ {"msid-semantic", sizeof("msid-semantic"),
+ sdp_parse_attr_msid_semantic, sdp_build_attr_msid_semantic},
+ {"bundle-only", sizeof("bundle-only"),
+ sdp_parse_attr_simple_flag, sdp_build_attr_simple_flag},
+ {"end-of-candidates", sizeof("end-of-candidates"),
+ sdp_parse_attr_simple_flag, sdp_build_attr_simple_flag},
+ {"ice-options", sizeof("ice-options"),
+ sdp_parse_attr_complete_line, sdp_build_attr_simple_string},
+ {"ssrc", sizeof("ssrc"),
+ sdp_parse_attr_ssrc, sdp_build_attr_ssrc},
+ {"imageattr", sizeof("imageattr"),
+ sdp_parse_attr_complete_line, sdp_build_attr_simple_string},
+ {"simulcast", sizeof("simulcast"),
+ sdp_parse_attr_complete_line, sdp_build_attr_simple_string},
+ {"rid", sizeof("rid"),
+ sdp_parse_attr_complete_line, sdp_build_attr_simple_string},
+ {"dtls-message", sizeof("dtls-message"),
+ sdp_parse_attr_long_line, sdp_build_attr_long_line},
+};
+
+/* Note: These *must* be in the same order as the enum types. */
+const sdp_namearray_t sdp_media[SDP_MAX_MEDIA_TYPES] =
+{
+ {"audio", sizeof("audio")},
+ {"video", sizeof("video")},
+ {"application", sizeof("application")},
+ {"data", sizeof("data")},
+ {"control", sizeof("control")},
+ {"nas/radius", sizeof("nas/radius")},
+ {"nas/tacacs", sizeof("nas/tacacs")},
+ {"nas/diameter", sizeof("nas/diameter")},
+ {"nas/l2tp", sizeof("nas/l2tp")},
+ {"nas/login", sizeof("nas/login")},
+ {"nas/none", sizeof("nas/none")},
+ {"image", sizeof("image")},
+ {"text", sizeof("text")}
+};
+
+
+/* Note: These *must* be in the same order as the enum types. */
+const sdp_namearray_t sdp_nettype[SDP_MAX_NETWORK_TYPES] =
+{
+ {"IN", sizeof("IN")},
+ {"ATM", sizeof("ATM")},
+ {"FR", sizeof("FR")},
+ {"LOCAL", sizeof("LOCAL")}
+};
+
+
+/* Note: These *must* be in the same order as the enum types. */
+const sdp_namearray_t sdp_addrtype[SDP_MAX_ADDR_TYPES] =
+{
+ {"IP4", sizeof("IP4")},
+ {"IP6", sizeof("IP6")},
+ {"NSAP", sizeof("NSAP")},
+ {"EPN", sizeof("EPN")},
+ {"E164", sizeof("E164")},
+ {"GWID", sizeof("GWID")}
+};
+
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_transport[SDP_MAX_TRANSPORT_TYPES] =
+{
+ {"RTP/AVP", sizeof("RTP/AVP")},
+ {"udp", sizeof("udp")},
+ {"udptl", sizeof("udptl")},
+ {"ces10", sizeof("ces10")},
+ {"LOCAL", sizeof("LOCAL")},
+ {"AAL2/ITU", sizeof("AAL2/ITU")},
+ {"AAL2/ATMF", sizeof("AAL2/ATMF")},
+ {"AAL2/custom", sizeof("AAL2/custom")},
+ {"AAL1/AVP", sizeof("AAL1/AVP")},
+ {"udpsprt", sizeof("udpsprt")},
+ {"RTP/SAVP", sizeof("RTP/SAVP")},
+ {"tcp", sizeof("tcp")},
+ {"RTP/SAVPF", sizeof("RTP/SAVPF")},
+ {"DTLS/SCTP", sizeof("DTLS/SCTP")},
+ {"RTP/AVPF", sizeof("RTP/AVPF")},
+ {"UDP/TLS/RTP/SAVP", sizeof("UDP/TLS/RTP/SAVP")},
+ {"UDP/TLS/RTP/SAVPF", sizeof("UDP/TLS/RTP/SAVPF")},
+ {"TCP/TLS/RTP/SAVP", sizeof("TCP/TLS/RTP/SAVP")},
+ {"TCP/TLS/RTP/SAVPF", sizeof("TCP/TLS/RTP/SAVPF")},
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_encrypt[SDP_MAX_ENCRYPT_TYPES] =
+{
+ {"clear", sizeof("clear")},
+ {"base64", sizeof("base64")},
+ {"uri", sizeof("uri")},
+ {"prompt", sizeof("prompt")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_payload[SDP_MAX_STRING_PAYLOAD_TYPES] =
+{
+ {"t38", sizeof("t38")},
+ {"X-tmr", sizeof("X-tmr")},
+ {"T120", sizeof("T120")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_t38_rate[SDP_T38_MAX_RATES] =
+{
+ {"localTCF", sizeof("localTCF")},
+ {"transferredTCF", sizeof("transferredTCF")},
+ {"unknown", sizeof("unknown")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_t38_udpec[SDP_T38_MAX_UDPEC] =
+{
+ {"t38UDPRedundancy", sizeof("t38UDPRedundancy")},
+ {"t38UDPFEC", sizeof("t38UDPFEC")},
+ {"unknown", sizeof("unknown")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_qos_strength[SDP_MAX_QOS_STRENGTH] =
+{
+ {"optional", sizeof("optional")},
+ {"mandatory", sizeof("mandatory")},
+ {"success", sizeof("success")},
+ {"failure", sizeof("failure")},
+ {"none", sizeof("none")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_qos_status_type[SDP_MAX_QOS_STATUS_TYPES] =
+{
+ {"local", sizeof("local")},
+ {"remote", sizeof("remote")},
+ {"e2e", sizeof("e2e")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_curr_type[SDP_MAX_CURR_TYPES] =
+{
+ {"qos", sizeof("qos")},
+ {"unknown", sizeof("unknown")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_des_type[SDP_MAX_DES_TYPES] =
+{
+ {"qos", sizeof("qos")},
+ {"unknown", sizeof("unknown")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_conf_type[SDP_MAX_CONF_TYPES] =
+{
+ {"qos", sizeof("qos")},
+ {"unknown", sizeof("unknown")}
+};
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_qos_direction[SDP_MAX_QOS_DIR] =
+{
+ {"send", sizeof("send")},
+ {"recv", sizeof("recv")},
+ {"sendrecv", sizeof("sendrecv")},
+ {"none", sizeof("none")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_silencesupp_pref[SDP_MAX_SILENCESUPP_PREF] = {
+ {"standard", sizeof("standard")},
+ {"custom", sizeof("custom")},
+ {"-", sizeof("-")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_silencesupp_siduse[SDP_MAX_SILENCESUPP_SIDUSE] = {
+ {"No SID", sizeof("No SID")},
+ {"Fixed Noise", sizeof("Fixed Noise")},
+ {"Sampled Noise", sizeof("Sampled Noise")},
+ {"-", sizeof("-")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_mediadir_role[SDP_MAX_MEDIADIR_ROLES] =
+{
+ {"passive", sizeof("passive")},
+ {"active", sizeof("active")},
+ {"both", sizeof("both")},
+ {"reuse", sizeof("reuse")},
+ {"unknown", sizeof("unknown")}
+};
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_fmtp_codec_param[SDP_MAX_FMTP_PARAM] =
+{
+ {"annexa", sizeof("annexa")}, /* 0 */
+ {"annexb", sizeof("annexb")}, /* 1 */
+ {"bitrate", sizeof("bitrate")}, /* 2 */
+ {"QCIF", sizeof("QCIF")}, /* 3 */
+ {"CIF", sizeof("CIF")}, /* 4 */
+ {"MAXBR", sizeof("MAXBR")}, /* 5 */
+ {"SQCIF", sizeof("SQCIF")}, /* 6 */
+ {"CIF4", sizeof("CIF4")}, /* 7 */
+ {"CIF16", sizeof("CIF16")}, /* 8 */
+ {"CUSTOM", sizeof("CUSTOM")}, /* 9 */
+ {"PAR", sizeof("PAR")}, /* 10 */
+ {"CPCF", sizeof("CPCF")}, /* 11 */
+ {"BPP", sizeof("BPP")}, /* 12 */
+ {"HRD", sizeof("HRD")}, /* 13 */
+ {"PROFILE", sizeof("PROFILE")}, /* 14 */
+ {"LEVEL", sizeof("LEVEL")}, /* 15 */
+ {"INTERLACE", sizeof("INTERLACE")}, /* 16 */
+
+ /* H.264 related */
+ {"profile-level-id", sizeof("profile-level-id")}, /* 17 */
+ {"sprop-parameter-sets", sizeof("sprop-parameter-sets")}, /* 18 */
+ {"packetization-mode", sizeof("packetization-mode")}, /* 19 */
+ {"sprop-interleaving-depth", sizeof("sprop-interleaving-depth")}, /* 20 */
+ {"sprop-deint-buf-req", sizeof("sprop-deint-buf-req")}, /* 21 */
+ {"sprop-max-don-diff", sizeof("sprop-max-don-diff")}, /* 22 */
+ {"sprop-init-buf-time", sizeof("sprop-init-buf-time")}, /* 23 */
+
+ {"max-mbps", sizeof("max-mbps")}, /* 24 */
+ {"max-fs", sizeof("max-fs")}, /* 25 */
+ {"max-cpb", sizeof("max-cpb")}, /* 26 */
+ {"max-dpb", sizeof("max-dpb")}, /* 27 */
+ {"max-br", sizeof("max-br")}, /* 28 */
+ {"redundant-pic-cap", sizeof("redundant-pic-cap")}, /* 29 */
+ {"deint-buf-cap", sizeof("deint-buf-cap")}, /* 30 */
+ {"max-rcmd-nalu-size", sizeof("max-rcmd_nali-size")}, /* 31 */
+ {"parameter-add", sizeof("parameter-add")}, /* 32 */
+
+ /* Annexes - require special handling */
+ {"D", sizeof("D")}, /* 33 */
+ {"F", sizeof("F")}, /* 34 */
+ {"I", sizeof("I")}, /* 35 */
+ {"J", sizeof("J")}, /* 36 */
+ {"T", sizeof("T")}, /* 37 */
+ {"K", sizeof("K")}, /* 38 */
+ {"N", sizeof("N")}, /* 39 */
+ {"P", sizeof("P")}, /* 40 */
+
+ {"mode", sizeof("mode")}, /* 41 */
+ {"level-asymmetry-allowed", sizeof("level-asymmetry-allowed")}, /* 42 */
+ {"maxaveragebitrate", sizeof("maxaveragebitrate")}, /* 43 */
+ {"usedtx", sizeof("usedtx")}, /* 44 */
+ {"stereo", sizeof("stereo")}, /* 45 */
+ {"useinbandfec", sizeof("useinbandfec")}, /* 46 */
+ {"maxcodedaudiobandwidth", sizeof("maxcodedaudiobandwidth")}, /* 47 */
+ {"cbr", sizeof("cbr")}, /* 48 */
+ {"max-fr", sizeof("max-fr")}, /* 49 */
+ {"maxplaybackrate", sizeof("maxplaybackrate")} /* 50 */
+} ;
+
+/* Note: These *must* be in the same order as the enum type. */
+const sdp_namearray_t sdp_fmtp_codec_param_val[SDP_MAX_FMTP_PARAM_VAL] =
+{
+ {"yes", sizeof("yes")},
+ {"no", sizeof("no")}
+};
+
+const sdp_namearray_t sdp_bw_modifier_val[SDP_MAX_BW_MODIFIER_VAL] =
+{
+ {"AS", sizeof("AS")},
+ {"CT", sizeof("CT")},
+ {"TIAS", sizeof("TIAS")}
+};
+
+const sdp_namearray_t sdp_group_attr_val[SDP_MAX_GROUP_ATTR_VAL] =
+{
+ {"FID", sizeof("FID")},
+ {"LS", sizeof("LS")},
+ {"ANAT", sizeof("ANAT")},
+ {"BUNDLE", sizeof("BUNDLE")}
+};
+
+const sdp_namearray_t sdp_srtp_context_crypto_suite[SDP_SRTP_MAX_NUM_CRYPTO_SUITES] =
+{
+ {"UNKNOWN_CRYPTO_SUITE", sizeof("UNKNOWN_CRYPTO_SUITE")},
+ {"AES_CM_128_HMAC_SHA1_32", sizeof("AES_CM_128_HMAC_SHA1_32")},
+ {"AES_CM_128_HMAC_SHA1_80", sizeof("AES_CM_128_HMAC_SHA1_80")},
+ {"F8_128_HMAC_SHA1_80", sizeof("F8_128_HMAC_SHA1_80")}
+};
+
+/* Maintain the same order as defined in typedef sdp_src_filter_mode_e */
+const sdp_namearray_t sdp_src_filter_mode_val[SDP_MAX_FILTER_MODE] =
+{
+ {"incl", sizeof("incl")},
+ {"excl", sizeof("excl")}
+};
+
+/* Maintain the same order as defined in typdef sdp_rtcp_unicast_mode_e */
+const sdp_namearray_t sdp_rtcp_unicast_mode_val[SDP_RTCP_MAX_UNICAST_MODE] =
+{
+ {"reflection", sizeof("reflection")},
+ {"rsi", sizeof("rsi")}
+};
+
+#define SDP_NAME(x) {x, sizeof(x)}
+/* Maintain the same order as defined in typdef sdp_rtcp_fb_type_e */
+const sdp_namearray_t sdp_rtcp_fb_type_val[SDP_MAX_RTCP_FB] =
+{
+ SDP_NAME("ack"),
+ SDP_NAME("ccm"),
+ SDP_NAME("nack"),
+ SDP_NAME("trr-int"),
+ SDP_NAME("goog-remb")
+};
+
+/* Maintain the same order as defined in typdef sdp_rtcp_fb_nack_type_e */
+const sdp_namearray_t sdp_rtcp_fb_nack_type_val[SDP_MAX_RTCP_FB_NACK] =
+{
+ SDP_NAME(""),
+ SDP_NAME("sli"),
+ SDP_NAME("pli"),
+ SDP_NAME("rpsi"),
+ SDP_NAME("app"),
+ SDP_NAME("rai"),
+ SDP_NAME("tllei"),
+ SDP_NAME("pslei"),
+ SDP_NAME("ecn")
+};
+
+/* Maintain the same order as defined in typdef sdp_rtcp_fb_ack_type_e */
+const sdp_namearray_t sdp_rtcp_fb_ack_type_val[SDP_MAX_RTCP_FB_ACK] =
+{
+ SDP_NAME("rpsi"),
+ SDP_NAME("app")
+};
+
+/* Maintain the same order as defined in typdef sdp_rtcp_fb_ccm_type_e */
+const sdp_namearray_t sdp_rtcp_fb_ccm_type_val[SDP_MAX_RTCP_FB_CCM] =
+{
+ SDP_NAME("fir"),
+ SDP_NAME("tmmbr"),
+ SDP_NAME("tstr"),
+ SDP_NAME("vbcm")
+};
+
+/* Maintain the same order as defined in typedef sdp_setup_type_e */
+const sdp_namearray_t sdp_setup_type_val[SDP_MAX_SETUP] =
+{
+ SDP_NAME("active"),
+ SDP_NAME("passive"),
+ SDP_NAME("actpass"),
+ SDP_NAME("holdconn")
+};
+
+/* Maintain the same order as defined in typedef sdp_connection_type_e */
+const sdp_namearray_t sdp_connection_type_val[SDP_MAX_CONNECTION] =
+{
+ SDP_NAME("new"),
+ SDP_NAME("existing")
+};
+
+/* Maintain same order as defined in typedef sdp_srtp_crypto_suite_t */
+const sdp_srtp_crypto_suite_list sdp_srtp_crypto_suite_array[SDP_SRTP_MAX_NUM_CRYPTO_SUITES] =
+{
+ {SDP_SRTP_UNKNOWN_CRYPTO_SUITE, UNKNOWN_CRYPTO_SUITE, 0, 0},
+ {SDP_SRTP_AES_CM_128_HMAC_SHA1_32, AES_CM_128_HMAC_SHA1_32,
+ SDP_SRTP_AES_CM_128_HMAC_SHA1_32_KEY_BYTES,
+ SDP_SRTP_AES_CM_128_HMAC_SHA1_32_SALT_BYTES},
+ {SDP_SRTP_AES_CM_128_HMAC_SHA1_80, AES_CM_128_HMAC_SHA1_80,
+ SDP_SRTP_AES_CM_128_HMAC_SHA1_80_KEY_BYTES,
+ SDP_SRTP_AES_CM_128_HMAC_SHA1_80_SALT_BYTES},
+ {SDP_SRTP_F8_128_HMAC_SHA1_80, F8_128_HMAC_SHA1_80,
+ SDP_SRTP_F8_128_HMAC_SHA1_80_KEY_BYTES,
+ SDP_SRTP_F8_128_HMAC_SHA1_80_SALT_BYTES}
+};
+
+const char* sdp_result_name[SDP_MAX_RC] =
+ {"SDP_SUCCESS",
+ "SDP_FAILURE",
+ "SDP_INVALID_SDP_PTR",
+ "SDP_NOT_SDP_DESCRIPTION",
+ "SDP_INVALID_TOKEN_ORDERING",
+ "SDP_INVALID_PARAMETER",
+ "SDP_INVALID_MEDIA_LEVEL",
+ "SDP_INVALID_CAPABILITY",
+ "SDP_NO_RESOURCE",
+ "SDP_UNRECOGNIZED_TOKEN",
+ "SDP_NULL_BUF_PTR",
+ "SDP_POTENTIAL_SDP_OVERFLOW",
+ "SDP_EMPTY_TOKEN"};
+
+const char *sdp_get_result_name ( sdp_result_e rc )
+{
+ if (rc >= SDP_MAX_RC) {
+ return ("Invalid SDP result code");
+ } else {
+ return (sdp_result_name[rc]);
+ }
+}
+
+const char *sdp_get_attr_name ( sdp_attr_e attr_type )
+{
+ if (attr_type >= SDP_MAX_ATTR_TYPES) {
+ return ("Invalid attribute type");
+ } else {
+ return (sdp_attr[attr_type].name);
+ }
+}
+
+const char *sdp_get_media_name ( sdp_media_e media_type )
+{
+ if (media_type == SDP_MEDIA_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (media_type >= SDP_MAX_MEDIA_TYPES) {
+ return ("Invalid media type");
+ } else {
+ return (sdp_media[media_type].name);
+ }
+}
+
+const char *sdp_get_network_name ( sdp_nettype_e network_type )
+{
+ if (network_type == SDP_NT_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (network_type >= SDP_MAX_NETWORK_TYPES) {
+ return ("Invalid network type");
+ } else {
+ return (sdp_nettype[network_type].name);
+ }
+}
+
+const char *sdp_get_address_name ( sdp_addrtype_e addr_type )
+{
+ if (addr_type == SDP_AT_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (addr_type >= SDP_MAX_ADDR_TYPES) {
+ if (addr_type == SDP_AT_FQDN) {
+ return ("*");
+ } else {
+ return ("Invalid address type");
+ }
+ } else {
+ return (sdp_addrtype[addr_type].name);
+ }
+}
+
+const char *sdp_get_transport_name ( sdp_transport_e transport_type )
+{
+ if (transport_type == SDP_TRANSPORT_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (transport_type >= SDP_MAX_TRANSPORT_TYPES) {
+ return ("Invalid transport type");
+ } else {
+ return (sdp_transport[transport_type].name);
+ }
+}
+
+const char *sdp_get_encrypt_name ( sdp_encrypt_type_e encrypt_type )
+{
+ if (encrypt_type == SDP_ENCRYPT_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (encrypt_type >= SDP_MAX_ENCRYPT_TYPES) {
+ return ("Invalid encryption type");
+ } else {
+ return (sdp_encrypt[encrypt_type].name);
+ }
+}
+
+const char *sdp_get_payload_name ( sdp_payload_e payload )
+{
+ if (payload == SDP_PAYLOAD_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (payload >= SDP_MAX_STRING_PAYLOAD_TYPES) {
+ return ("Invalid payload type");
+ } else {
+ return (sdp_payload[payload].name);
+ }
+}
+
+const char *sdp_get_t38_ratemgmt_name ( sdp_t38_ratemgmt_e rate )
+{
+ if (rate >= SDP_T38_MAX_RATES) {
+ return ("Invalid rate");
+ } else {
+ return (sdp_t38_rate[rate].name);
+ }
+}
+
+const char *sdp_get_t38_udpec_name ( sdp_t38_udpec_e udpec )
+{
+ if (udpec >= SDP_T38_MAX_UDPEC) {
+ return ("Invalid udpec");
+ } else {
+ return (sdp_t38_udpec[udpec].name);
+ }
+}
+
+const char *sdp_get_qos_strength_name ( sdp_qos_strength_e strength )
+{
+ if (strength >= SDP_MAX_QOS_STRENGTH) {
+ return ("Invalid qos strength");
+ } else {
+ return (sdp_qos_strength[strength].name);
+ }
+}
+
+const char *sdp_get_qos_direction_name ( sdp_qos_dir_e direction )
+{
+ if (direction >= SDP_MAX_QOS_DIR) {
+ return ("Invalid qos direction");
+ } else {
+ return (sdp_qos_direction[direction].name);
+ }
+}
+
+const char *sdp_get_qos_status_type_name ( sdp_qos_status_types_e status_type )
+{
+ if (status_type >= SDP_MAX_QOS_STATUS_TYPES) {
+ return ("Invalid qos status type");
+ } else {
+ return (sdp_qos_status_type[status_type].name);
+ }
+}
+
+const char *sdp_get_curr_type_name (sdp_curr_type_e curr_type )
+{
+ if (curr_type >= SDP_MAX_CURR_TYPES) {
+ return ("Invalid curr type");
+ } else {
+ return (sdp_curr_type[curr_type].name);
+ }
+}
+
+const char *sdp_get_des_type_name (sdp_des_type_e des_type )
+{
+ if (des_type >= SDP_MAX_DES_TYPES) {
+ return ("Invalid des type");
+ } else {
+ return (sdp_des_type[des_type].name);
+ }
+}
+
+const char *sdp_get_conf_type_name (sdp_conf_type_e conf_type )
+{
+ if (conf_type >= SDP_MAX_CONF_TYPES) {
+ return ("Invalid conf type");
+ } else {
+ return (sdp_conf_type[conf_type].name);
+ }
+}
+
+const char *sdp_get_silencesupp_pref_name (sdp_silencesupp_pref_e pref)
+{
+ if (pref >= SDP_MAX_SILENCESUPP_PREF) {
+ return ("Invalid silencesupp pref");
+ } else {
+ return (sdp_silencesupp_pref[pref].name);
+ }
+}
+
+const char *sdp_get_silencesupp_siduse_name (sdp_silencesupp_siduse_e siduse)
+{
+ if (siduse >= SDP_MAX_SILENCESUPP_SIDUSE) {
+ return ("Invalid silencesupp siduse");
+ } else {
+ return (sdp_silencesupp_siduse[siduse].name);
+ }
+}
+
+const char *sdp_get_mediadir_role_name (sdp_mediadir_role_e role)
+{
+ if (role >= SDP_MEDIADIR_ROLE_UNKNOWN) {
+ return ("Invalid media direction role");
+ } else {
+ return (sdp_mediadir_role[role].name);
+ }
+}
+
+
+const char *sdp_get_bw_modifier_name (sdp_bw_modifier_e bw_modifier_type)
+{
+ if (bw_modifier_type == SDP_BW_MODIFIER_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (bw_modifier_type < SDP_BW_MODIFIER_AS ||
+ bw_modifier_type >= SDP_MAX_BW_MODIFIER_VAL) {
+ return ("Invalid bw modifier type");
+ } else {
+ return (sdp_bw_modifier_val[bw_modifier_type].name);
+ }
+}
+
+const char *sdp_get_group_attr_name (sdp_group_attr_e group_attr_type)
+{
+ if (group_attr_type == SDP_GROUP_ATTR_UNSUPPORTED) {
+ return (SDP_UNSUPPORTED);
+ } else if (group_attr_type >= SDP_MAX_GROUP_ATTR_VAL) {
+ return ("Invalid a=group: attribute type");
+ } else {
+ return (sdp_group_attr_val[group_attr_type].name);
+ }
+}
+
+const char *sdp_get_src_filter_mode_name (sdp_src_filter_mode_e type)
+{
+ if (type >= SDP_MAX_FILTER_MODE) {
+ return ("Invalid source filter mode");
+ } else {
+ return (sdp_src_filter_mode_val[type].name);
+ }
+}
+
+const char *sdp_get_rtcp_unicast_mode_name (sdp_rtcp_unicast_mode_e type)
+{
+ if (type >= SDP_RTCP_MAX_UNICAST_MODE) {
+ return ("Invalid rtcp unicast mode");
+ } else {
+ return (sdp_rtcp_unicast_mode_val[type].name);
+ }
+}
+
+/* Function: sdp_init_description
+ * Description: Allocates a new SDP structure that can be used for either
+ * parsing or building an SDP description. This routine
+ * saves the config pointer passed in the SDP structure so
+ * SDP will know how to parse/build based on the options defined.
+ * An SDP structure must be allocated before parsing or building
+ * since the handle must be passed to these routines.
+ * Parameters: config_p The config handle returned by sdp_init_config
+ * Returns: A handle for a new SDP structure as a void ptr.
+*/
+sdp_t *sdp_init_description (sdp_conf_options_t *conf_p)
+{
+ int i;
+ sdp_t *sdp_p;
+
+ if (!conf_p) {
+ return (NULL);
+ }
+
+ sdp_p = (sdp_t *)SDP_MALLOC(sizeof(sdp_t));
+ if (sdp_p == NULL) {
+ return (NULL);
+ }
+
+ sdp_p->conf_p = conf_p;
+ sdp_p->version = SDP_CURRENT_VERSION;
+ sdp_p->owner_name[0] = '\0';
+ sdp_p->owner_sessid[0] = '\0';
+ sdp_p->owner_version[0] = '\0';
+ sdp_p->owner_network_type = SDP_NT_INVALID;
+ sdp_p->owner_addr_type = SDP_AT_INVALID;
+ sdp_p->owner_addr[0] = '\0';
+ sdp_p->sessname[0] = '\0';
+ sdp_p->sessinfo_found = FALSE;
+ sdp_p->uri_found = FALSE;
+
+ sdp_p->default_conn.nettype = SDP_NT_INVALID;
+ sdp_p->default_conn.addrtype = SDP_AT_INVALID;
+ sdp_p->default_conn.conn_addr[0] = '\0';
+ sdp_p->default_conn.is_multicast = FALSE;
+ sdp_p->default_conn.ttl = 0;
+ sdp_p->default_conn.num_of_addresses = 0;
+
+ sdp_p->bw.bw_data_count = 0;
+ sdp_p->bw.bw_data_list = NULL;
+
+ sdp_p->timespec_p = NULL;
+ sdp_p->sess_attrs_p = NULL;
+ sdp_p->mca_p = NULL;
+ sdp_p->mca_count = 0;
+
+ /* Set default debug flags from application config. */
+ for (i=0; i < SDP_MAX_DEBUG_TYPES; i++) {
+ sdp_p->debug_flag[i] = conf_p->debug_flag[i];
+ }
+
+ return (sdp_p);
+}
+
+
+/* Function: void sdp_debug(sdp_t *sdp_p, sdp_debug_e debug_type,
+ * tinybool my_bool);
+ * Description: Define the type of debug for this particular SDP structure.
+ * By default, each SDP description has the settings that are
+ * set for the application.
+ * Valid debug types are ERRORS, WARNINGS, and TRACE. Each
+ * debug type can be turned on/off individually. The
+ * debug level can be redefined at any time.
+ * Parameters: sdp_ptr The SDP handle returned by sdp_init_description.
+ * debug_type Specifies the debug type being enabled/disabled.
+ * my_bool Defines whether the debug should be enabled or not.
+ * Returns: Nothing.
+ */
+void sdp_debug (sdp_t *sdp_p, sdp_debug_e debug_type, tinybool debug_flag)
+{
+ if (!sdp_p) {
+ return;
+ }
+
+ if (debug_type < SDP_MAX_DEBUG_TYPES) {
+ sdp_p->debug_flag[debug_type] = debug_flag;
+ }
+}
+
+
+/* Function: void sdp_set_string_debug(sdp_t *sdp_p, char *debug_str)
+ * Description: Define a string to be associated with all debug output
+ * for this SDP. The string will be copied into the SDP
+ * structure and so the library will not be dependent on
+ * the application's memory for this string.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description.
+ * debug_str Pointer to a string that should be printed out
+ * with every debug msg.
+ * Returns: Nothing.
+ */
+void sdp_set_string_debug (sdp_t *sdp_p, const char *debug_str)
+{
+ if (!sdp_p) {
+ return;
+ }
+
+ sstrncpy(sdp_p->debug_str, debug_str, sizeof(sdp_p->debug_str));
+}
+
+
+/* Function: sdp_validate_sdp
+ * Description: Validate an SDP structure.
+ * Parameters: sdp_p The SDP handle of the struct to validate.
+ * Returns: A result value indicating if the validation was successful.
+ * If not, what type of error was encountered.
+ */
+sdp_result_e sdp_validate_sdp (sdp_t *sdp_p)
+{
+ int i;
+ uint16_t num_media_levels;
+
+ /* Need to validate c= info is specified at session level or
+ * at all m= levels.
+ */
+ if (sdp_connection_valid((void *)sdp_p, SDP_SESSION_LEVEL) == FALSE) {
+ num_media_levels = sdp_get_num_media_lines((void *)sdp_p);
+ for (i=1; i <= num_media_levels; i++) {
+ if (sdp_connection_valid((void *)sdp_p, (unsigned short)i) == FALSE) {
+ sdp_parse_error(sdp_p,
+ "%s c= connection line not specified for "
+ "every media level, validation failed.",
+ sdp_p->debug_str);
+ return (SDP_FAILURE);
+ }
+ }
+ }
+
+ /* Validate required lines were specified */
+ if ((sdp_owner_valid((void *)sdp_p) == FALSE) &&
+ (sdp_p->conf_p->owner_reqd == TRUE)) {
+ sdp_parse_error(sdp_p,
+ "%s o= owner line not specified, validation failed.",
+ sdp_p->debug_str);
+ return (SDP_FAILURE);
+ }
+
+ if ((sdp_session_name_valid((void *)sdp_p) == FALSE) &&
+ (sdp_p->conf_p->session_name_reqd == TRUE)) {
+ sdp_parse_error(sdp_p,
+ "%s s= session name line not specified, validation failed.",
+ sdp_p->debug_str);
+ return (SDP_FAILURE);
+ }
+
+ if ((sdp_timespec_valid((void *)sdp_p) == FALSE) &&
+ (sdp_p->conf_p->timespec_reqd == TRUE)) {
+ sdp_parse_error(sdp_p,
+ "%s t= timespec line not specified, validation failed.",
+ sdp_p->debug_str);
+ return (SDP_FAILURE);
+ }
+
+ return (SDP_SUCCESS);
+}
+
+/* Function: sdp_parse
+ * Description: Parse an SDP description in the specified buffer.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description
+ * bufp Pointer to the buffer containing the SDP
+ * description to parse.
+ * len The length of the buffer.
+ * Returns: A result value indicating if the parse was successful and
+ * if not, what type of error was encountered. The
+ * information from the parse is stored in the sdp_p structure.
+ */
+sdp_result_e sdp_parse (sdp_t *sdp_p, const char *buf, size_t len)
+{
+ uint8_t i;
+ uint16_t cur_level = SDP_SESSION_LEVEL;
+ const char *ptr;
+ const char *next_ptr = NULL;
+ char *line_end;
+ sdp_token_e last_token = SDP_TOKEN_V;
+ sdp_result_e result = SDP_SUCCESS;
+ tinybool parse_done = FALSE;
+ tinybool end_found = FALSE;
+ tinybool first_line = TRUE;
+ tinybool unrec_token = FALSE;
+ const char **bufp = &buf;
+
+ if (!sdp_p) {
+ return (SDP_INVALID_SDP_PTR);
+ }
+
+ if ((bufp == NULL) || (*bufp == NULL)) {
+ return (SDP_NULL_BUF_PTR);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Trace SDP Parse:", sdp_p->debug_str);
+ }
+
+ next_ptr = *bufp;
+ sdp_p->conf_p->num_parses++;
+
+ /* Initialize the last valid capability instance to zero. Used
+ * to help in parsing X-cpar attrs. */
+ sdp_p->cap_valid = FALSE;
+ sdp_p->last_cap_inst = 0;
+
+ sdp_p->parse_line = 0;
+
+ /* We want to try to find the end of the SDP description, even if
+ * we find a parsing error.
+ */
+ while (!end_found) {
+ /* If the last char of this line goes beyond the end of the buffer,
+ * we don't parse it.
+ */
+ ptr = next_ptr;
+ sdp_p->parse_line++;
+ line_end = sdp_findchar(ptr, "\n");
+ if ((line_end >= (*bufp + len)) ||
+ (*line_end == '\0')) {
+ /* As this does not update the result value the SDP up to this point
+ * is still accept as valid. So encountering this is not treated as
+ * an error.
+ */
+ sdp_parse_error(sdp_p,
+ "%s End of line beyond end of buffer.",
+ sdp_p->debug_str);
+ CSFLogError(logTag, "SDP: Invalid SDP, no \\n (len %u): %*s",
+ (unsigned)len, (int)len, *bufp);
+ end_found = TRUE;
+ break;
+ }
+
+ /* Print the line if we're tracing. */
+ if ((parse_done == FALSE) &&
+ (sdp_p->debug_flag[SDP_DEBUG_TRACE])) {
+ SDP_PRINT("%s ", sdp_p->debug_str);
+
+ SDP_PRINT("%*s", (int)(line_end - ptr), ptr);
+
+ }
+
+ /* Find out which token this line has, if any. */
+ for (i=0; i < SDP_MAX_TOKENS; i++) {
+ if (strncmp(ptr, sdp_token[i].name, SDP_TOKEN_LEN) == 0) {
+ break;
+ }
+ }
+ if (i == SDP_MAX_TOKENS) {
+ /* See if the second char on the next line is an '=' char.
+ * If so, we note this as an unrecognized token line. */
+ if (ptr[1] == '=') {
+ unrec_token = TRUE;
+ }
+ if (first_line == TRUE) {
+ sdp_parse_error(sdp_p,
+ "%s Attempt to parse text not recognized as "
+ "SDP text, parse fails.", sdp_p->debug_str);
+ /* If we haven't already printed out the line we
+ * were trying to parse, do it now.
+ */
+ if (!sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s ", sdp_p->debug_str);
+ SDP_PRINT("%*s", (int)(line_end - ptr), ptr);
+ }
+ sdp_p->conf_p->num_not_sdp_desc++;
+ return (SDP_NOT_SDP_DESCRIPTION);
+ } else {
+ end_found = TRUE;
+ break;
+ }
+ }
+
+ /* This is the beginning of a new SDP description. */
+ if ((first_line != TRUE) && (i == SDP_TOKEN_V)) {
+ end_found = TRUE;
+ break;
+ }
+
+ /* Advance the next ptr to one char beyond the end of the line. */
+ next_ptr = line_end + 1;
+ if (next_ptr >= (*bufp + len)) {
+ end_found = TRUE;
+ }
+
+ /* If we've finished parsing and are just looking for the end of
+ * the SDP description, we don't need to do anything else here.
+ */
+ if (parse_done == TRUE) {
+ continue;
+ }
+
+ /* Only certain tokens are valid at the media level. */
+ if (cur_level != SDP_SESSION_LEVEL) {
+ if ((i != SDP_TOKEN_I) && (i != SDP_TOKEN_C) &&
+ (i != SDP_TOKEN_B) && (i != SDP_TOKEN_K) &&
+ (i != SDP_TOKEN_A) && (i != SDP_TOKEN_M)) {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid token %s found at media level",
+ sdp_p->debug_str, sdp_token[i].name);
+ continue;
+ }
+ }
+
+ /* Verify the token ordering. */
+ if (first_line == TRUE) {
+ if (i != SDP_TOKEN_V) {
+ if (sdp_p->conf_p->version_reqd == TRUE) {
+ sdp_parse_error(sdp_p,
+ "%s First line not v=, parse fails",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_token_order++;
+ result = SDP_INVALID_TOKEN_ORDERING;
+ parse_done = TRUE;
+ } else {
+ last_token = (sdp_token_e)i;
+ }
+ } else {
+ last_token = (sdp_token_e)i;
+ }
+ first_line = FALSE;
+ } else {
+ if (i < last_token) {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: Invalid token ordering detected, "
+ "token %s found after token %s", sdp_p->debug_str,
+ sdp_token[i].name, sdp_token[last_token].name);
+ }
+ }
+
+ /* Finally parse the line. */
+ ptr += SDP_TOKEN_LEN;
+ result = sdp_token[i].parse_func(sdp_p, cur_level, (const char *)ptr);
+ last_token = (sdp_token_e)i;
+ if (last_token == SDP_TOKEN_M) {
+ if (cur_level == SDP_SESSION_LEVEL) {
+ cur_level = 1;
+ } else {
+ cur_level++;
+ }
+ /* The token ordering can start again at i= */
+ last_token = (sdp_token_e)(SDP_TOKEN_I - 1);
+ }
+ if (result != SDP_SUCCESS) {
+ parse_done = TRUE;
+ }
+
+ /* Skip the new line char at the end of this line and see if
+ * this is the end of the buffer.
+ */
+ if ((line_end + 1) == (*bufp + len)) {
+ end_found = TRUE;
+ }
+ }
+
+ /* If we found no valid lines, return an error. */
+ if (first_line == TRUE) {
+ sdp_p->conf_p->num_not_sdp_desc++;
+ return (SDP_NOT_SDP_DESCRIPTION);
+ }
+
+ /* If no errors were found yet, validate the overall sdp. */
+ if (result == SDP_SUCCESS) {
+ result = sdp_validate_sdp(sdp_p);
+ }
+ /* Return the pointer where we left off. */
+ *bufp = next_ptr;
+ /* If the SDP is valid, but the next line following was an
+ * unrecognized <token>= line, indicate this on the return. */
+ if ((result == SDP_SUCCESS) && (unrec_token == TRUE)) {
+ return (SDP_UNRECOGNIZED_TOKEN);
+ } else {
+ return (result);
+ }
+}
+
+
+/* Function: sdp_build
+ * Description: Build an SDP description in the specified buffer based
+ * on the information in the given SDP structure.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description
+ * fs A flex_string where the SDP description should be built.
+ * Returns: A result value indicating if the build was successful and
+ * if not, what type of error was encountered - e.g.,
+ * description was too long for the given buffer.
+ */
+sdp_result_e sdp_build (sdp_t *sdp_p, flex_string *fs)
+{
+ int i, j;
+ sdp_result_e result = SDP_SUCCESS;
+
+ if (!sdp_p) {
+ return (SDP_INVALID_SDP_PTR);
+ }
+
+ if (!fs) {
+ return (SDP_NULL_BUF_PTR);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Trace SDP Build:", sdp_p->debug_str);
+ }
+
+ sdp_p->conf_p->num_builds++;
+
+ for (i=0; ((i < SDP_TOKEN_M) &&
+ (result == SDP_SUCCESS)); i++) {
+ result = sdp_token[i].build_func(sdp_p, SDP_SESSION_LEVEL, fs);
+ /* ok not to check buffer space (yet) as the if() checks it */
+ }
+ /* If the session level was ok, build the media lines. */
+ if (result == SDP_SUCCESS) {
+ for (i=1; ((i <= sdp_p->mca_count) &&
+ (result == SDP_SUCCESS)); i++) {
+ result = sdp_token[SDP_TOKEN_M].build_func(sdp_p, (uint16_t)i, fs);
+
+ /* ok not to check buffer space (yet) as the for() checks it */
+ for (j=SDP_TOKEN_I;
+ ((j < SDP_TOKEN_M) && (result == SDP_SUCCESS));
+ j++) {
+ if ((j == SDP_TOKEN_U) || (j == SDP_TOKEN_E) ||
+ (j == SDP_TOKEN_P) || (j == SDP_TOKEN_T) ||
+ (j == SDP_TOKEN_R) || (j == SDP_TOKEN_Z)) {
+ /* These tokens not valid at media level. */
+ continue;
+ }
+ result = sdp_token[j].build_func(sdp_p, (uint16_t)i, fs);
+ /* ok not to check buffer space (yet) as the for() checks it */
+ }
+ }
+ }
+
+ return (result);
+}
+
+/* Function: sdp_free_description
+ * Description: Free an SDP description and all memory associated with it.
+ * Parameters: sdp_p The SDP handle returned by sdp_init_description
+ * Returns: A result value indicating if the free was successful and
+ * if not, what type of error was encountered - e.g., sdp_p
+ * was invalid and didn't point to an SDP structure.
+*/
+sdp_result_e sdp_free_description (sdp_t *sdp_p)
+{
+ sdp_timespec_t *time_p, *next_time_p;
+ sdp_attr_t *attr_p, *next_attr_p;
+ sdp_mca_t *mca_p, *next_mca_p;
+ sdp_bw_t *bw_p;
+ sdp_bw_data_t *bw_data_p;
+
+ if (!sdp_p) {
+ return (SDP_INVALID_SDP_PTR);
+ }
+
+ /* Free the config structure */
+ sdp_free_config(sdp_p->conf_p);
+
+ /* Free any timespec structures - should be only one since
+ * this is all we currently support.
+ */
+ time_p = sdp_p->timespec_p;
+ while (time_p != NULL) {
+ next_time_p = time_p->next_p;
+ SDP_FREE(time_p);
+ time_p = next_time_p;
+ }
+
+ bw_p = &(sdp_p->bw);
+ bw_data_p = bw_p->bw_data_list;
+ while (bw_data_p != NULL) {
+ bw_p->bw_data_list = bw_data_p->next_p;
+ SDP_FREE(bw_data_p);
+ bw_data_p = bw_p->bw_data_list;
+ }
+
+ /* Free any session attr structures */
+ attr_p = sdp_p->sess_attrs_p;
+ while (attr_p != NULL) {
+ next_attr_p = attr_p->next_p;
+ sdp_free_attr(attr_p);
+ attr_p = next_attr_p;
+ }
+
+ /* Free any mca structures */
+ mca_p = sdp_p->mca_p;
+ while (mca_p != NULL) {
+ next_mca_p = mca_p->next_p;
+
+ /* Free any media attr structures */
+ attr_p = mca_p->media_attrs_p;
+ while (attr_p != NULL) {
+ next_attr_p = attr_p->next_p;
+ sdp_free_attr(attr_p);
+ attr_p = next_attr_p;
+ }
+
+ /* Free the media profiles struct if allocated. */
+ if (mca_p->media_profiles_p != NULL) {
+ SDP_FREE(mca_p->media_profiles_p);
+ }
+
+ bw_p = &(mca_p->bw);
+ bw_data_p = bw_p->bw_data_list;
+ while (bw_data_p != NULL) {
+ bw_p->bw_data_list = bw_data_p->next_p;
+ SDP_FREE(bw_data_p);
+ bw_data_p = bw_p->bw_data_list;
+ }
+
+ SDP_FREE(mca_p);
+ mca_p = next_mca_p;
+ }
+
+ SDP_FREE(sdp_p);
+
+ return (SDP_SUCCESS);
+}
+
+/*
+ * sdp_parse_error
+ * Send SDP parsing errors to log and up to peerconnection
+ */
+void sdp_parse_error(sdp_t* sdp, const char *format, ...) {
+ flex_string fs;
+ va_list ap;
+
+ flex_string_init(&fs);
+
+ va_start(ap, format);
+ flex_string_vsprintf(&fs, format, ap);
+ va_end(ap);
+
+ CSFLogError("SDP Parse", "SDP Parse Error %s, line %u", fs.buffer,
+ sdp->parse_line);
+
+ if (sdp->conf_p->error_handler) {
+ sdp->conf_p->error_handler(sdp->conf_p->error_handler_context,
+ sdp->parse_line,
+ fs.buffer);
+ }
+
+ flex_string_free(&fs);
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_os_defs.h b/media/webrtc/signaling/src/sdp/sipcc/sdp_os_defs.h
new file mode 100644
index 000000000..6e4dd8c64
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_os_defs.h
@@ -0,0 +1,27 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SDP_OS_DEFS_H_
+#define _SDP_OS_DEFS_H_
+
+#include <stdlib.h>
+
+#include "cpr_types.h"
+#include "cpr_string.h"
+
+
+#define SDP_PRINT(format, ...) CSFLogError("sdp" , format , ## __VA_ARGS__ )
+
+/* Use operating system malloc */
+#define SDP_MALLOC(x) calloc(1, (x))
+#define SDP_FREE free
+
+typedef uint8_t tinybool;
+typedef unsigned short ushort;
+typedef unsigned long ulong;
+#ifndef __GNUC_STDC_INLINE__
+#define inline
+#endif
+
+#endif /* _SDP_OS_DEFS_H_ */
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_private.h b/media/webrtc/signaling/src/sdp/sipcc/sdp_private.h
new file mode 100644
index 000000000..a98f4b119
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_private.h
@@ -0,0 +1,364 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _SIPCC_SDP_PRIVATE_H_
+#define _SIPCC_SDP_PRIVATE_H_
+
+
+#include "sdp.h"
+
+extern const sdp_attrarray_t sdp_attr[];
+extern const sdp_namearray_t sdp_media[];
+extern const sdp_namearray_t sdp_nettype[];
+extern const sdp_namearray_t sdp_addrtype[];
+extern const sdp_namearray_t sdp_transport[];
+extern const sdp_namearray_t sdp_encrypt[];
+extern const sdp_namearray_t sdp_payload[];
+extern const sdp_namearray_t sdp_t38_rate[];
+extern const sdp_namearray_t sdp_t38_udpec[];
+extern const sdp_namearray_t sdp_qos_strength[];
+extern const sdp_namearray_t sdp_qos_direction[];
+extern const sdp_namearray_t sdp_qos_status_type[];
+extern const sdp_namearray_t sdp_curr_type[];
+extern const sdp_namearray_t sdp_des_type[];
+extern const sdp_namearray_t sdp_conf_type[];
+extern const sdp_namearray_t sdp_mediadir_role[];
+extern const sdp_namearray_t sdp_fmtp_codec_param[];
+extern const sdp_namearray_t sdp_fmtp_codec_param_val[];
+extern const sdp_namearray_t sdp_silencesupp_pref[];
+extern const sdp_namearray_t sdp_silencesupp_siduse[];
+extern const sdp_namearray_t sdp_srtp_context_crypto_suite[];
+extern const sdp_namearray_t sdp_bw_modifier_val[];
+extern const sdp_namearray_t sdp_group_attr_val[];
+extern const sdp_namearray_t sdp_src_filter_mode_val[];
+extern const sdp_namearray_t sdp_rtcp_unicast_mode_val[];
+extern const sdp_namearray_t sdp_rtcp_fb_type_val[];
+extern const sdp_namearray_t sdp_rtcp_fb_nack_type_val[];
+extern const sdp_namearray_t sdp_rtcp_fb_ack_type_val[];
+extern const sdp_namearray_t sdp_rtcp_fb_ccm_type_val[];
+extern const sdp_namearray_t sdp_setup_type_val[];
+extern const sdp_namearray_t sdp_connection_type_val[];
+
+
+extern const sdp_srtp_crypto_suite_list sdp_srtp_crypto_suite_array[];
+/* Function Prototypes */
+
+/* sdp_access.c */
+extern sdp_mca_t *sdp_find_media_level(sdp_t *sdp_p, uint16_t level);
+extern sdp_bw_data_t* sdp_find_bw_line (sdp_t *sdp_ptr, uint16_t level, uint16_t inst_num);
+
+/* sdp_attr.c */
+extern sdp_result_e
+sdp_build_attr_fmtp_params (sdp_t *sdp_p, sdp_fmtp_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_parse_attribute(sdp_t *sdp_p, uint16_t level,
+ const char *ptr);
+extern sdp_result_e sdp_parse_attr_simple_string(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_simple_string(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_simple_u32(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_simple_u32(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_simple_bool(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_simple_bool(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_maxprate(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_parse_attr_fmtp(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_fmtp(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_sctpmap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_sctpmap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_msid(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_msid(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_msid_semantic(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_msid_semantic(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_ssrc(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_ssrc(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_direction(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_direction(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_qos(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_qos(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_curr(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_curr (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_des(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_des (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_conf(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_conf (sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_transport_map(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_transport_map(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_subnet(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_subnet(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_t38_ratemgmt(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_t38_ratemgmt(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_t38_udpec(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_t38_udpec(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_cap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_cap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_cpar(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_cpar(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_pc_codec(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_pc_codec(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_xcap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_xcap(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_xcpar(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_xcpar(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_rtcp(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_rtcp(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_rtr(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_rtr(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_comediadir(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_comediadir(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_silencesupp(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_silencesupp(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_srtpcontext(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_srtpcontext(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_rtcp_fb(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_rtcp_fb(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_setup(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_setup(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_connection(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_connection(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_extmap(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ const char *ptr);
+extern sdp_result_e sdp_build_attr_extmap(sdp_t *sdp_p,
+ sdp_attr_t *attr_p,
+ flex_string *fs);
+extern sdp_result_e sdp_parse_attr_mptime(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_mptime(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_x_sidin(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_x_sidin(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_parse_attr_x_sidout(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_x_sidout(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_parse_attr_x_confid(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_x_confid(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_parse_attr_group(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_group(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_parse_attr_source_filter(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_source_filter(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_parse_attr_rtcp_unicast(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_rtcp_unicast(
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+
+extern sdp_result_e sdp_build_attr_ice_attr (
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_ice_attr (
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+
+extern sdp_result_e sdp_build_attr_simple_flag (
+ sdp_t *sdp_p, sdp_attr_t *attr_p, flex_string *fs);
+extern sdp_result_e sdp_parse_attr_simple_flag (
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+
+extern sdp_result_e sdp_parse_attr_complete_line (
+ sdp_t *sdp_p, sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_parse_attr_long_line(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, const char *ptr);
+extern sdp_result_e sdp_build_attr_long_line(sdp_t *sdp_p,
+ sdp_attr_t *attr_p, flex_string *fs);
+
+/* sdp_attr_access.c */
+extern void sdp_free_attr(sdp_attr_t *attr_p);
+extern sdp_result_e sdp_find_attr_list(sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_t **attr_p, char *fname);
+extern sdp_attr_t *sdp_find_attr(sdp_t *sdp_p, uint16_t level, uint8_t cap_num,
+ sdp_attr_e attr_type, uint16_t inst_num);
+extern sdp_attr_t *sdp_find_capability(sdp_t *sdp_p, uint16_t level, uint8_t cap_num);
+
+/* sdp_main.c */
+extern const char *sdp_get_attr_name(sdp_attr_e attr_type);
+extern const char *sdp_get_media_name(sdp_media_e media_type);
+extern const char *sdp_get_network_name(sdp_nettype_e network_type);
+extern const char *sdp_get_address_name(sdp_addrtype_e addr_type);
+extern const char *sdp_get_transport_name(sdp_transport_e transport_type);
+extern const char *sdp_get_encrypt_name(sdp_encrypt_type_e encrypt_type);
+extern const char *sdp_get_payload_name(sdp_payload_e payload);
+extern const char *sdp_get_t38_ratemgmt_name(sdp_t38_ratemgmt_e rate);
+extern const char *sdp_get_t38_udpec_name(sdp_t38_udpec_e udpec);
+extern const char *sdp_get_qos_strength_name(sdp_qos_strength_e strength);
+extern const char *sdp_get_qos_direction_name(sdp_qos_dir_e direction);
+extern const char *sdp_get_qos_status_type_name(sdp_qos_status_types_e status_type);
+extern const char *sdp_get_curr_type_name(sdp_curr_type_e curr_type);
+extern const char *sdp_get_des_type_name(sdp_des_type_e des_type);
+extern const char *sdp_get_conf_type_name(sdp_conf_type_e conf_type);
+extern const char *sdp_get_mediadir_role_name (sdp_mediadir_role_e role);
+extern const char *sdp_get_silencesupp_pref_name(sdp_silencesupp_pref_e pref);
+extern const char *sdp_get_silencesupp_siduse_name(sdp_silencesupp_siduse_e
+ siduse);
+
+extern const char *sdp_get_group_attr_name(sdp_group_attr_e group_attr);
+extern const char *sdp_get_src_filter_mode_name(sdp_src_filter_mode_e type);
+extern const char *sdp_get_rtcp_unicast_mode_name(sdp_rtcp_unicast_mode_e type);
+
+/* sdp_tokens.c */
+extern sdp_result_e sdp_parse_version(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_version(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_owner(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_owner(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_sessname(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_sessname(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_sessinfo(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_sessinfo(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_uri(sdp_t *sdp_p, uint16_t token, const char *ptr);
+extern sdp_result_e sdp_build_uri(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_email(sdp_t *sdp_p, uint16_t token, const char *ptr);
+extern sdp_result_e sdp_build_email(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_phonenum(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_phonenum(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_connection(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_connection(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_bandwidth(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_bandwidth(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_timespec(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_timespec(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_repeat_time(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_repeat_time(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_timezone_adj(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_timezone_adj(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_encryption(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_encryption(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_media(sdp_t *sdp_p, uint16_t token, const char *ptr);
+extern sdp_result_e sdp_build_media(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+extern sdp_result_e sdp_parse_attribute(sdp_t *sdp_p, uint16_t token,
+ const char *ptr);
+extern sdp_result_e sdp_build_attribute(sdp_t *sdp_p, uint16_t token, flex_string *fs);
+
+extern void sdp_parse_payload_types(sdp_t *sdp_p, sdp_mca_t *mca_p,
+ const char *ptr);
+extern sdp_result_e sdp_parse_multiple_profile_payload_types(sdp_t *sdp_p,
+ sdp_mca_t *mca_p,
+ const char *ptr);
+extern sdp_result_e
+sdp_parse_attr_sdescriptions(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ const char *ptr);
+
+extern sdp_result_e
+sdp_build_attr_sdescriptions(sdp_t *sdp_p, sdp_attr_t *attr_p,
+ flex_string *fs);
+
+
+/* sdp_utils.c */
+extern sdp_mca_t *sdp_alloc_mca(uint32_t line);
+extern tinybool sdp_validate_maxprate(const char *string_parm);
+extern char *sdp_findchar(const char *ptr, char *char_list);
+extern const char *sdp_getnextstrtok(const char *str, char *tokenstr, unsigned tokenstr_len,
+ const char *delim, sdp_result_e *result);
+extern uint32_t sdp_getnextnumtok(const char *str, const char **str_end,
+ const char *delim, sdp_result_e *result);
+extern uint32_t sdp_getnextnumtok_or_null(const char *str, const char **str_end,
+ const char *delim, tinybool *null_ind,
+ sdp_result_e *result);
+extern tinybool sdp_getchoosetok(const char *str, const char **str_end,
+ const char *delim, sdp_result_e *result);
+
+extern
+tinybool verify_sdescriptions_mki(char *buf, char *mkiVal, uint16_t *mkiLen);
+
+extern
+tinybool verify_sdescriptions_lifetime(char *buf);
+
+/* sdp_services_xxx.c */
+extern void sdp_dump_buffer(char *_ptr, int _size_bytes);
+
+tinybool sdp_checkrange(sdp_t *sdp, char *num, ulong* lval);
+
+#endif /* _SDP_PRIVATE_H_ */
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_services_unix.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_services_unix.c
new file mode 100644
index 000000000..8f43adc43
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_services_unix.c
@@ -0,0 +1,41 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+
+/******************************************************************/
+/* Required Platform Routines */
+/* */
+/* These routines are called from the common SDP code. */
+/* They must be provided for each platform. */
+/* */
+/******************************************************************/
+
+/*
+ * sdp_dump_buffer
+ *
+ * Utility to send _size_bytes of data from the string
+ * pointed to by _ptr to the buginf function. This may make
+ * multiple buginf calls if the buffer is too large for buginf.
+ */
+void sdp_dump_buffer (char * _ptr, int _size_bytes)
+{
+ CSFLogDebug("sdp", "%s", _ptr);
+}
+
+/******************************************************************/
+/* */
+/* Platform Specific Routines */
+/* */
+/* These routines are only used in this particular platform. */
+/* They are called from the required platform specific */
+/* routines provided below, not from the common SDP code. */
+/* */
+/******************************************************************/
+
+/* There are currently no platform specific routines required. */
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_services_win32.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_services_win32.c
new file mode 100644
index 000000000..1815abbfe
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_services_win32.c
@@ -0,0 +1,40 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+
+/******************************************************************/
+/* Required Platform Routines */
+/* */
+/* These routines are called from the common SDP code. */
+/* They must be provided for each platform. */
+/* */
+/******************************************************************/
+
+/*
+ * sdp_dump_buffer
+ *
+ * Utility to send _size_bytes of data from the string
+ * pointed to by _ptr to the buginf function. This may make
+ * multiple buginf calls if the buffer is too large for buginf.
+ */
+void sdp_dump_buffer (char * _ptr, int _size_bytes)
+{
+ CSFLogDebug("sdp", _ptr);
+}
+
+/******************************************************************/
+/* */
+/* Platform Specific Routines */
+/* */
+/* These routines are only used in this particular platform. */
+/* They are called from the required platform specific */
+/* routines provided below, not from the common SDP code. */
+/* */
+/******************************************************************/
+
+/* There are currently no platform specific routines required. */
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_token.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_token.c
new file mode 100644
index 000000000..a002f9a73
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_token.c
@@ -0,0 +1,1812 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <errno.h>
+
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+#include "prprf.h"
+
+static const char *logTag = "sdp_token";
+
+#define MCAST_STRING_LEN 4
+
+
+sdp_result_e sdp_parse_version (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ sdp_result_e result = SDP_FAILURE;
+
+ sdp_p->version = (uint16_t)sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+ if ((result != SDP_SUCCESS) || (sdp_p->version != SDP_CURRENT_VERSION)) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid version (%u) found, parse failed.",
+ sdp_p->debug_str, (unsigned)sdp_p->version);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse version line successful, version %u",
+ sdp_p->debug_str, (unsigned)sdp_p->version);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_version (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ if (sdp_p->version == SDP_INVALID_VALUE) {
+ if (sdp_p->conf_p->version_reqd == TRUE) {
+ CSFLogError(logTag, "%s Invalid version for v= line, "
+ "build failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ /* v= line is not required. */
+ return (SDP_SUCCESS);
+ }
+ }
+
+ flex_string_sprintf(fs, "v=%u\r\n", (unsigned)sdp_p->version);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built v= version line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+static sdp_result_e sdp_verify_unsigned(const char *ptr, uint64_t max_value)
+{
+ uint64_t numeric_value;
+ /* Checking for only numbers since PR_sscanf will ignore trailing
+ characters */
+ size_t end = strspn(ptr, "0123456789");
+
+ if (ptr[end] != '\0')
+ return SDP_INVALID_PARAMETER;
+
+ if (PR_sscanf(ptr, "%llu", &numeric_value) != 1)
+ return SDP_INVALID_PARAMETER;
+
+ if (numeric_value > max_value)
+ return SDP_INVALID_PARAMETER;
+
+ return SDP_SUCCESS;
+}
+
+sdp_result_e sdp_parse_owner (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+ /* The spec says this:
+
+ The numeric value of the session id
+ and version in the o line MUST be representable with a 64 bit signed
+ integer. The initial value of the version MUST be less than
+ (2**62)-1, to avoid rollovers.
+ */
+ const uint64_t max_value_sessid = ((((uint64_t) 1) << 63) - 1);
+ /* Do not check that this is 2^62 - 1; that's just the limit on
+ * the initial version, not every version number. */
+ const uint64_t max_value_version = ((((uint64_t) 1) << 63) - 1);
+
+ if (sdp_p->owner_name[0] != '\0') {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: More than one o= line specified.",
+ sdp_p->debug_str);
+ }
+
+ /* Find the owner name. */
+ ptr = sdp_getnextstrtok(ptr, sdp_p->owner_name, sizeof(sdp_p->owner_name), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No owner name specified for o=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the owner session id. This is a numeric field but is
+ * stored as a string since it may be 64 bit.
+ */
+ ptr = sdp_getnextstrtok(ptr, sdp_p->owner_sessid, sizeof(sdp_p->owner_sessid), " \t", &result);
+ if (result == SDP_SUCCESS) {
+ /* Make sure the sessid is numeric, even though we store it as
+ * a string.
+ */
+ result = sdp_verify_unsigned(sdp_p->owner_sessid, max_value_sessid);
+ }
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid owner session id specified for o=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the owner version. */
+ ptr = sdp_getnextstrtok(ptr, sdp_p->owner_version, sizeof(sdp_p->owner_version), " \t", &result);
+ if (result == SDP_SUCCESS) {
+ /* Make sure the version is numeric, even though we store it as
+ * a string.
+ */
+ result = sdp_verify_unsigned(sdp_p->owner_version, max_value_version);
+ }
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid owner version specified for o=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the owner network type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No owner network type specified for o=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ sdp_p->owner_network_type = SDP_NT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_NETWORK_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_nettype[i].name,
+ sdp_nettype[i].strlen) == 0) {
+ if (sdp_p->conf_p->nettype_supported[i] == TRUE) {
+ sdp_p->owner_network_type = (sdp_nettype_e)i;
+ }
+ }
+ }
+ if (sdp_p->owner_network_type == SDP_NT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Owner network type unsupported (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the owner address type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No owner address type specified for o=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ sdp_p->owner_addr_type = SDP_AT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_ADDR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_addrtype[i].name,
+ sdp_addrtype[i].strlen) == 0) {
+ if (sdp_p->conf_p->addrtype_supported[i] == TRUE) {
+ sdp_p->owner_addr_type = (sdp_addrtype_e)i;
+ }
+ }
+ }
+ if ((sdp_p->owner_addr_type == SDP_AT_UNSUPPORTED) &&
+ (sdp_p->owner_network_type != SDP_NT_ATM)) {
+ sdp_parse_error(sdp_p,
+ "%s Owner address type unsupported (%s)",
+ sdp_p->debug_str, tmp);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find the owner address. */
+ ptr = sdp_getnextstrtok(ptr, sdp_p->owner_addr, sizeof(sdp_p->owner_addr), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No owner address specified.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse owner: name %s, session id %s, version %s",
+ sdp_p->debug_str, sdp_p->owner_name, sdp_p->owner_sessid,
+ sdp_p->owner_version);
+ SDP_PRINT("%s network %s, address type %s, "
+ "address %s", sdp_p->debug_str,
+ sdp_get_network_name(sdp_p->owner_network_type),
+ sdp_get_address_name(sdp_p->owner_addr_type),
+ sdp_p->owner_addr);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_owner (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ if ((sdp_p->owner_name[0] == '\0') ||
+ (sdp_p->owner_network_type >= SDP_MAX_NETWORK_TYPES) ||
+ (sdp_p->owner_addr_type >= SDP_MAX_ADDR_TYPES) ||
+ (sdp_p->owner_addr[0] == '\0')) {
+
+ if((sdp_p->owner_network_type == SDP_NT_ATM) &&
+ (sdp_p->owner_addr_type == SDP_AT_INVALID)) {
+ flex_string_sprintf(fs, "o=%s %s %s %s - -\r\n",
+ sdp_p->owner_name, sdp_p->owner_sessid,
+ sdp_p->owner_version,
+ sdp_get_network_name(sdp_p->owner_network_type));
+ }
+
+ if (sdp_p->conf_p->owner_reqd == TRUE) {
+ CSFLogError(logTag, "%s Invalid params for o= owner line, "
+ "build failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ /* o= line is not required. */
+ return (SDP_SUCCESS);
+ }
+ }
+
+ flex_string_sprintf(fs, "o=%s %s %s %s %s %s\r\n",
+ sdp_p->owner_name, sdp_p->owner_sessid,
+ sdp_p->owner_version,
+ sdp_get_network_name(sdp_p->owner_network_type),
+ sdp_get_address_name(sdp_p->owner_addr_type),
+ sdp_p->owner_addr);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built o= owner line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_sessname (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ int str_len;
+ char *endptr;
+
+ if (sdp_p->sessname[0] != '\0') {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: More than one s= line specified.",
+ sdp_p->debug_str);
+ }
+
+ endptr = sdp_findchar(ptr, "\r\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No session name specified.",
+ sdp_p->debug_str);
+ }
+ str_len = MIN(endptr - ptr, SDP_MAX_STRING_LEN);
+ sstrncpy(sdp_p->sessname, ptr, str_len+1);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse session name, %s",
+ sdp_p->debug_str, sdp_p->sessname);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_sessname (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ if (sdp_p->sessname[0] == '\0') {
+ if (sdp_p->conf_p->session_name_reqd == TRUE) {
+ CSFLogError(logTag, "%s No param defined for s= session name line, "
+ "build failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ /* s= line is not required. */
+ return (SDP_SUCCESS);
+ }
+ }
+
+ flex_string_sprintf(fs, "s=%s\r\n", sdp_p->sessname);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built s= session name line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+/* We don't want to store the session info, but we do want to validate
+ * that at most one i= line exists at each level and if the line exists
+ * there should be a parameter.
+ */
+sdp_result_e sdp_parse_sessinfo (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *endptr;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ if (sdp_p->sessinfo_found == TRUE) {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: More than one i= line specified.",
+ sdp_p->debug_str);
+ }
+ sdp_p->sessinfo_found = TRUE;
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ if (mca_p->sessinfo_found == TRUE) {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: More than one i= line specified"
+ " for media line %u.", sdp_p->debug_str, (unsigned)level);
+ }
+ mca_p->sessinfo_found = TRUE;
+ }
+
+ endptr = sdp_findchar(ptr, "\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No session info specified.",
+ sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed session info line.", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_sessinfo (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ /* Build session info line not supported. */
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_uri (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *endptr;
+
+ if (sdp_p->uri_found == TRUE) {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s Warning: More than one u= line specified.",
+ sdp_p->debug_str);
+ }
+ sdp_p->uri_found = TRUE;
+
+ endptr = sdp_findchar(ptr, "\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No URI info specified.", sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed URI line.", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_uri (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ /* Build URI line not supported. */
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_email (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *endptr;
+
+ endptr = sdp_findchar(ptr, "\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No email info specified.", sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse email line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_email (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ /* Build email line not supported. */
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_phonenum (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *endptr;
+
+ endptr = sdp_findchar(ptr, "\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No phone number info specified.",
+ sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse phone number line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_phonenum (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ /* Build phone number line not supported. */
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_connection (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ int i;
+ const char *slash_ptr;
+ sdp_result_e result;
+ sdp_conn_t *conn_p;
+ sdp_mca_t *mca_p;
+ char tmp[SDP_MAX_STRING_LEN];
+ char mcast_str[MCAST_STRING_LEN];
+ int mcast_bits;
+ unsigned long strtoul_result;
+ char *strtoul_end;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ /* See if the c= line is already defined at this level. We don't
+ * currently support multihoming and so we only support one c= at
+ * each level.
+ */
+ if (conn_p->nettype != SDP_NT_INVALID) {
+ sdp_p->conf_p->num_invalid_token_order++;
+ sdp_parse_error(sdp_p,
+ "%s c= line specified twice at same level, "
+ "parse failed.", sdp_p->debug_str);
+ return (SDP_INVALID_TOKEN_ORDERING);
+ }
+
+ /* Find the connection network type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No connection network type specified for c=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ conn_p->nettype = SDP_NT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_NETWORK_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_nettype[i].name,
+ sdp_nettype[i].strlen) == 0) {
+ if (sdp_p->conf_p->nettype_supported[i] == TRUE) {
+ conn_p->nettype = (sdp_nettype_e)i;
+ }
+ }
+ }
+ if (conn_p->nettype == SDP_NT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Connection network type unsupported "
+ "(%s) for c=.", sdp_p->debug_str, tmp);
+ }
+
+ /* Find the connection address type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ if (conn_p->nettype == SDP_NT_ATM) {
+ /* If the nettype is ATM, addr type and addr are not reqd */
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse connection: network %s", sdp_p->debug_str,
+ sdp_get_network_name(conn_p->nettype));
+ }
+ return (SDP_SUCCESS);
+ } else {
+ sdp_parse_error(sdp_p,
+ "%s No connection address type specified for "
+ "c=.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+ conn_p->addrtype = SDP_AT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_ADDR_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_addrtype[i].name,
+ sdp_addrtype[i].strlen) == 0) {
+ if (sdp_p->conf_p->addrtype_supported[i] == TRUE) {
+ conn_p->addrtype = (sdp_addrtype_e)i;
+ }
+ }
+ }
+ if (conn_p->addrtype == SDP_AT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Connection address type unsupported "
+ "(%s) for c=.", sdp_p->debug_str, tmp);
+ }
+
+ /* Find the connection address. */
+ ptr = sdp_getnextstrtok(ptr, conn_p->conn_addr, sizeof(conn_p->conn_addr), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No connection address specified for c=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ /* We currently only support addrs containing '/'s for EPN addrs.
+ * For other addrs this would indicate multicast addrs. */
+ /* Multicast host group addresses are defined to be the IP addresses
+ * whose high-order four bits are 1110, giving an address range from
+ * 224.0.0.0 through 239.255.255.255
+ */
+ /* multicast addr check */
+ sstrncpy (mcast_str, conn_p->conn_addr, MCAST_STRING_LEN);
+
+ if (conn_p->addrtype == SDP_AT_IP4) {
+ errno = 0;
+ strtoul_result = strtoul(mcast_str, &strtoul_end, 10);
+
+ if (errno || mcast_str == strtoul_end || strtoul_result > 255) {
+ sdp_parse_error(sdp_p,
+ "%s Error parsing address %s for mcast.",
+ sdp_p->debug_str, mcast_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+
+ mcast_bits = (int) strtoul_result;
+ if ((mcast_bits >= SDP_MIN_MCAST_ADDR_HI_BIT_VAL ) &&
+ (mcast_bits <= SDP_MAX_MCAST_ADDR_HI_BIT_VAL)) {
+ SDP_PRINT("%s Parsed to be a multicast address with mcast bits %d",
+ sdp_p->debug_str, mcast_bits);
+ conn_p->is_multicast = TRUE;
+ }
+ }
+
+ if (conn_p->addrtype != SDP_AT_EPN) {
+ slash_ptr = sdp_findchar(conn_p->conn_addr, "/");
+ if (slash_ptr[0] != '\0') {
+ /* this used to rely on the above busted multicast check */
+ SDP_PRINT("%s An address with slash %s",
+ sdp_p->debug_str, conn_p->conn_addr);
+ conn_p->conn_addr[slash_ptr - conn_p->conn_addr] = '\0';
+ slash_ptr++;
+ slash_ptr = sdp_getnextstrtok(slash_ptr, tmp, sizeof(tmp),
+ "/", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No ttl value specified for this multicast addr with a slash",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ errno = 0;
+ strtoul_result = strtoul(tmp, &strtoul_end, 10);
+
+ if (errno || tmp == strtoul_end || conn_p->ttl > SDP_MAX_TTL_VALUE) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid TTL: Value must be in the range 0-255 ",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ conn_p->ttl = (int) strtoul_result;
+
+ /* search for num of addresses */
+ /*sa_ignore NO_NULL_CHK
+ {ptr is valid since the pointer was checked earlier and the
+ function would have exited if NULL.}*/
+ slash_ptr = sdp_findchar(slash_ptr, "/");
+ if (slash_ptr != NULL &&
+ slash_ptr[0] != '\0') {
+ SDP_PRINT("%s Found a num addr field for multicast addr %s ",
+ sdp_p->debug_str,slash_ptr);
+ slash_ptr++;
+
+ errno = 0;
+ strtoul_result = strtoul(slash_ptr, &strtoul_end, 10);
+
+ if (errno || slash_ptr == strtoul_end || strtoul_result == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid Num of addresses: Value must be > 0 ",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return SDP_INVALID_PARAMETER;
+ }
+
+ conn_p->num_of_addresses = (int) strtoul_result;
+ }
+ }
+ }
+
+ /* See if the address is the choose param and if it's allowed. */
+ if ((sdp_p->conf_p->allow_choose[SDP_CHOOSE_CONN_ADDR] == FALSE) &&
+ (strcmp(conn_p->conn_addr, "$") == 0)) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Choose parameter for connection "
+ "address specified but not allowed.", sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse connection: network %s, address type %s, "
+ "address %s ttl= %u num of addresses = %u",
+ sdp_p->debug_str,
+ sdp_get_network_name(conn_p->nettype),
+ sdp_get_address_name(conn_p->addrtype),
+ conn_p->conn_addr, (unsigned)conn_p->ttl, (unsigned)conn_p->num_of_addresses);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_connection (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ sdp_mca_t *mca_p;
+ sdp_conn_t *conn_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ conn_p = &(sdp_p->default_conn);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ conn_p = &(mca_p->conn);
+ }
+
+ if((conn_p->nettype == SDP_NT_ATM ) &&
+ (conn_p->addrtype == SDP_AT_INVALID)) {
+ /*allow c= line to be built without address type and address fields
+ * This is a special case for ATM PVC*/
+ flex_string_sprintf(fs, "c=%s\r\n",
+ sdp_get_network_name(conn_p->nettype));
+ return SDP_SUCCESS;
+ }
+ if ((conn_p->nettype >= SDP_MAX_NETWORK_TYPES) ||
+ (conn_p->addrtype >= SDP_MAX_ADDR_TYPES) ||
+ (conn_p->conn_addr[0] == '\0')) {
+ /* Connection info isn't set - don't need to build the token. */
+ return (SDP_SUCCESS);
+ }
+
+ if (conn_p->is_multicast) {
+ if (conn_p->num_of_addresses > 1) {
+ flex_string_sprintf(fs, "c=%s %s %s/%u/%u\r\n",
+ sdp_get_network_name(conn_p->nettype),
+ sdp_get_address_name(conn_p->addrtype),
+ conn_p->conn_addr,
+ (unsigned)conn_p->ttl,
+ (unsigned)conn_p->num_of_addresses);
+ } else {
+ flex_string_sprintf(fs, "c=%s %s %s/%u\r\n",
+ sdp_get_network_name(conn_p->nettype),
+ sdp_get_address_name(conn_p->addrtype),
+ conn_p->conn_addr,
+ (unsigned)conn_p->ttl);
+ }
+ } else {
+
+ flex_string_sprintf(fs, "c=%s %s %s\r\n",
+ sdp_get_network_name(conn_p->nettype),
+ sdp_get_address_name(conn_p->addrtype),
+ conn_p->conn_addr);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built c= connection line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+/*
+ * sdp_parse_bandwidth
+ *
+ * This function parses a bandwidth field. The parsing is done in accordance
+ * to the following ABNF:
+ *
+ * bandwidth-fields = *("b=" bwtype ":" bandwidth CRLF)
+ * bwtype = 1*(alpha-numeric)
+ * bandwidth = 1*(DIGIT)
+ *
+ * It currently supports three types of valid bwtypes - AS, CT and TIAS
+ */
+sdp_result_e sdp_parse_bandwidth (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ int i;
+ sdp_mca_t *mca_p;
+ sdp_bw_t *bw_p;
+ sdp_bw_data_t *bw_data_p;
+ sdp_bw_data_t *new_bw_data_p;
+ sdp_result_e result;
+ char tmp[SDP_MAX_STRING_LEN];
+ sdp_bw_modifier_e bw_modifier = SDP_BW_MODIFIER_UNSUPPORTED;
+ int bw_val = 0;
+
+ if (level == SDP_SESSION_LEVEL) {
+ bw_p = &(sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ bw_p = &(mca_p->bw);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse bandwidth line", sdp_p->debug_str);
+ }
+
+ /* Find the bw type (AS, CT or TIAS) */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), ":", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No bandwidth type specified for b= ",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ for (i=0; i < SDP_MAX_BW_MODIFIER_VAL; i++) {
+ if (cpr_strncasecmp(tmp, sdp_bw_modifier_val[i].name,
+ sdp_bw_modifier_val[i].strlen) == 0) {
+ bw_modifier = (sdp_bw_modifier_e)i;
+ break;
+ }
+ }
+
+ if (bw_modifier == SDP_BW_MODIFIER_UNSUPPORTED) {
+ /* We don't understand this parameter, so according to RFC4566 sec 5.8
+ * ignore it. */
+ return (SDP_SUCCESS);
+ }
+
+ /* Find the BW type value */
+ /*sa_ignore NO_NULL_CHK
+ {ptr is valid since the pointer was checked earlier and the
+ function would have exited if NULL.}*/
+ if (*ptr == ':') {
+ ptr++;
+ bw_val = sdp_getnextnumtok(ptr, &ptr, " \t", &result);
+ if ((result != SDP_SUCCESS)) {
+ sdp_parse_error(sdp_p,
+ "%s Error: No BW Value specified ",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+
+ /*
+ * Allocate a new sdp_bw_data_t instance and set it's values from the
+ * input parameters.
+ */
+ new_bw_data_p = (sdp_bw_data_t*)SDP_MALLOC(sizeof(sdp_bw_data_t));
+ if (new_bw_data_p == NULL) {
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_NO_RESOURCE);
+ }
+ new_bw_data_p->next_p = NULL;
+ new_bw_data_p->bw_modifier = bw_modifier;
+ new_bw_data_p->bw_val = bw_val;
+
+ /*
+ * Enqueue the sdp_bw_data_t instance at the end of the list of
+ * sdp_bw_data_t instances.
+ */
+ if (bw_p->bw_data_list == NULL) {
+ bw_p->bw_data_list = new_bw_data_p;
+ } else {
+ for (bw_data_p = bw_p->bw_data_list;
+ bw_data_p->next_p != NULL;
+ bw_data_p = bw_data_p->next_p) {
+ ; // Empty For
+ }
+ bw_data_p->next_p = new_bw_data_p;
+ }
+ bw_p->bw_data_count++;
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed bw type %s, value %d", sdp_p->debug_str,
+ sdp_get_bw_modifier_name(new_bw_data_p->bw_modifier),
+ new_bw_data_p->bw_val);
+ }
+
+ return (SDP_SUCCESS);
+}
+
+/*
+ * sdp_build_bandwidth
+ *
+ * Builds *all* the bandwith lines for the specified level.
+ */
+sdp_result_e sdp_build_bandwidth (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ sdp_bw_t *bw_p;
+ sdp_bw_data_t *bw_data_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ bw_p = &(sdp_p->bw);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ bw_p = &(mca_p->bw);
+ }
+
+ bw_data_p = bw_p->bw_data_list;
+ while (bw_data_p) {
+ flex_string_sprintf(fs, "b=%s:%d\r\n",
+ sdp_get_bw_modifier_name(bw_data_p->bw_modifier),
+ bw_data_p->bw_val);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built b=%s:%d bandwidth line", sdp_p->debug_str,
+ sdp_get_bw_modifier_name(bw_data_p->bw_modifier),
+ bw_data_p->bw_val);
+ }
+
+ bw_data_p = bw_data_p->next_p;
+ }
+
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_timespec (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *tmpptr;
+ sdp_result_e result;
+ sdp_timespec_t *timespec_p;
+ sdp_timespec_t *next_timespec_p;
+
+ timespec_p = (sdp_timespec_t *)SDP_MALLOC(sizeof(sdp_timespec_t));
+ if (timespec_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+
+ /* Validate start and stop times. */
+ ptr = sdp_getnextstrtok(ptr, timespec_p->start_time, sizeof(timespec_p->start_time), " \t", &result);
+ if (result == SDP_SUCCESS) {
+ /* Make sure the start_time is numeric, even though we store it as
+ * a string.
+ */
+ (void)sdp_getnextnumtok(timespec_p->start_time,
+ (const char **)&tmpptr, " \t", &result);
+ }
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid timespec start time specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ SDP_FREE(timespec_p);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ ptr = sdp_getnextstrtok(ptr, timespec_p->stop_time, sizeof(timespec_p->stop_time), " \t", &result);
+ if (result == SDP_SUCCESS) {
+ /* Make sure the start_time is numeric, even though we store it as
+ * a string.
+ */
+ (void)sdp_getnextnumtok(timespec_p->stop_time,
+ (const char **)&tmpptr, " \t", &result);
+ }
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid timespec stop time specified.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ SDP_FREE(timespec_p);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Link the new timespec in to the end of the list. */
+ if (sdp_p->timespec_p == NULL) {
+ sdp_p->timespec_p = timespec_p;
+ } else {
+ next_timespec_p = sdp_p->timespec_p;
+ while (next_timespec_p->next_p != NULL) {
+ next_timespec_p = next_timespec_p->next_p;
+ }
+ next_timespec_p->next_p = timespec_p;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed timespec line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_timespec (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ if ((sdp_p->timespec_p == NULL) ||
+ (sdp_p->timespec_p->start_time[0] == '\0') ||
+ (sdp_p->timespec_p->stop_time[0] == '\0')) {
+ if (sdp_p->conf_p->timespec_reqd == TRUE) {
+ CSFLogError(logTag, "%s Invalid params for t= time spec line, "
+ "build failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ /* t= line not required. */
+ return (SDP_SUCCESS);
+ }
+ }
+
+ /* Note: We only support one t= line currently. */
+ flex_string_sprintf(fs, "t=%s %s\r\n", sdp_p->timespec_p->start_time,
+ sdp_p->timespec_p->stop_time);
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built t= timespec line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_repeat_time (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *endptr;
+
+ endptr = sdp_findchar(ptr, "\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No repeat time parameters "
+ "specified.", sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parsed repeat time line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_repeat_time (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ /* Build repeat time line not supported. */
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_timezone_adj (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ char *endptr;
+
+ endptr = sdp_findchar(ptr, "\n");
+ if (ptr == endptr) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No timezone parameters specified.",
+ sdp_p->debug_str);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse timezone adustment line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_timezone_adj (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ /* Build timezone adjustment line not supported. */
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_encryption (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ int i;
+ sdp_result_e result;
+ sdp_encryptspec_t *encrypt_p;
+ sdp_mca_t *mca_p;
+ char tmp[SDP_MAX_STRING_LEN];
+
+ if (level == SDP_SESSION_LEVEL) {
+ encrypt_p = &(sdp_p->encrypt);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ encrypt_p = &(mca_p->encrypt);
+ }
+ encrypt_p->encrypt_key[0] = '\0';
+
+ /* Find the encryption type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), ":", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No encryption type specified for k=.",
+ sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ encrypt_p->encrypt_type = SDP_ENCRYPT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_ENCRYPT_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_encrypt[i].name,
+ sdp_encrypt[i].strlen) == 0) {
+ encrypt_p->encrypt_type = (sdp_encrypt_type_e)i;
+ break;
+ }
+ }
+ if (encrypt_p->encrypt_type == SDP_ENCRYPT_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Encryption type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ }
+
+ /* Find the encryption key. */
+ encrypt_p->encrypt_key[0] = '\0';
+ /*sa_ignore NO_NULL_CHK
+ {ptr is valid since the pointer was checked earlier and the
+ function would have exited if NULL.}*/
+ if (*ptr == ':')
+ ptr++;
+ if (encrypt_p->encrypt_type != SDP_ENCRYPT_PROMPT) {
+ ptr = sdp_getnextstrtok(ptr, encrypt_p->encrypt_key, sizeof(encrypt_p->encrypt_key), " \t", &result);
+ if ((result != SDP_SUCCESS) &&
+ ((encrypt_p->encrypt_type == SDP_ENCRYPT_CLEAR) ||
+ (encrypt_p->encrypt_type == SDP_ENCRYPT_BASE64) ||
+ (encrypt_p->encrypt_type == SDP_ENCRYPT_URI))) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No encryption key specified "
+ "as required.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Parse encryption type %s, key %s", sdp_p->debug_str,
+ sdp_get_encrypt_name(encrypt_p->encrypt_type),
+ encrypt_p->encrypt_key);
+ }
+ return (SDP_SUCCESS);
+}
+
+/* If the encryption info is valid, we build it. Else skip it. */
+sdp_result_e sdp_build_encryption (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ sdp_encryptspec_t *encrypt_p;
+ sdp_mca_t *mca_p;
+
+ if (level == SDP_SESSION_LEVEL) {
+ encrypt_p = &(sdp_p->encrypt);
+ } else {
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+ encrypt_p = &(mca_p->encrypt);
+ }
+
+ if ((encrypt_p->encrypt_type >= SDP_MAX_ENCRYPT_TYPES) ||
+ ((encrypt_p->encrypt_type != SDP_ENCRYPT_PROMPT) &&
+ (encrypt_p->encrypt_key[0] == '\0'))) {
+ /* Encryption info isn't set - don't need to build the token. */
+ return (SDP_SUCCESS);
+ }
+
+ flex_string_sprintf(fs, "k=%s",
+ sdp_get_encrypt_name(encrypt_p->encrypt_type));
+
+ if (encrypt_p->encrypt_type == SDP_ENCRYPT_PROMPT) {
+ /* There is no key to print. */
+ flex_string_sprintf(fs, "\r\n");
+ } else {
+ flex_string_sprintf(fs, ":%s\r\n", encrypt_p->encrypt_key);
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built k= encryption line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_parse_media (sdp_t *sdp_p, uint16_t level, const char *ptr)
+{
+ uint16_t i;
+ uint16_t num_port_params=0;
+ int32_t num[SDP_MAX_PORT_PARAMS];
+ tinybool valid_param = FALSE;
+ sdp_result_e result;
+ sdp_mca_t *mca_p;
+ sdp_mca_t *next_mca_p;
+ char tmp[SDP_MAX_STRING_LEN];
+ char port[SDP_MAX_STRING_LEN];
+ const char *port_ptr;
+ int32_t sctp_port;
+
+ /* Allocate resource for new media stream. */
+ mca_p = sdp_alloc_mca(sdp_p->parse_line);
+ if (mca_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ return (SDP_NO_RESOURCE);
+ }
+
+ /* Find the media type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No media type specified, parse failed.",
+ sdp_p->debug_str);
+ SDP_FREE(mca_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ mca_p->media = SDP_MEDIA_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_MEDIA_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_media[i].name,
+ sdp_media[i].strlen) == 0) {
+ mca_p->media = (sdp_media_e)i;
+ }
+ }
+ if (mca_p->media == SDP_MEDIA_UNSUPPORTED) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Media type unsupported (%s).",
+ sdp_p->debug_str, tmp);
+ }
+
+ /* Find the port token parameters, but don't process it until
+ * we determine the transport protocol as that determines what
+ * port number formats are valid.
+ */
+ ptr = sdp_getnextstrtok(ptr, port, sizeof(port), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No port specified in m= media line, "
+ "parse failed.", sdp_p->debug_str);
+ SDP_FREE(mca_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ port_ptr = port;
+ for (i=0; i < SDP_MAX_PORT_PARAMS; i++) {
+ if (sdp_getchoosetok(port_ptr, &port_ptr, "/ \t", &result) == TRUE) {
+ num[i] = SDP_CHOOSE_PARAM;
+ } else {
+ num[i] = sdp_getnextnumtok(port_ptr, (const char **)&port_ptr,
+ "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ break;
+ }
+ }
+ num_port_params++;
+ }
+
+ /* Find the transport protocol type. */
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No transport protocol type specified, "
+ "parse failed.", sdp_p->debug_str);
+ SDP_FREE(mca_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ mca_p->transport = SDP_TRANSPORT_UNSUPPORTED;
+ for (i=0; i < SDP_MAX_TRANSPORT_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_transport[i].name,
+ sdp_transport[i].strlen) == 0) {
+ mca_p->transport = (sdp_transport_e)i;
+ break;
+ }
+ }
+
+ /* TODO(ehugg): Remove this next block when backward
+ compatibility with versions earlier than FF24
+ is no longer required. See Bug 886134 */
+#define DATACHANNEL_OLD_TRANSPORT "SCTP/DTLS"
+ if (mca_p->transport == SDP_TRANSPORT_UNSUPPORTED) {
+ if (cpr_strncasecmp(tmp, DATACHANNEL_OLD_TRANSPORT,
+ strlen(DATACHANNEL_OLD_TRANSPORT)) == 0) {
+ mca_p->transport = SDP_TRANSPORT_DTLSSCTP;
+ }
+ }
+
+ if (mca_p->transport == SDP_TRANSPORT_UNSUPPORTED) {
+ /* If we don't recognize or don't support the transport type,
+ * just store the first num as the port.
+ */
+ mca_p->port = num[0];
+ sdp_parse_error(sdp_p,
+ "%s Warning: Transport protocol type unsupported "
+ "(%s).", sdp_p->debug_str, tmp);
+ }
+
+ /* Check for each of the possible port formats according to the
+ * type of transport protocol specified.
+ */
+ valid_param = FALSE;
+ switch (num_port_params) {
+ case 1:
+ if ((mca_p->transport == SDP_TRANSPORT_RTPAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_RTPSAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_RTPSAVPF) ||
+ (mca_p->transport == SDP_TRANSPORT_UDPTLSRTPSAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_UDPTLSRTPSAVPF) ||
+ (mca_p->transport == SDP_TRANSPORT_TCPTLSRTPSAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_TCPTLSRTPSAVPF) ||
+ (mca_p->transport == SDP_TRANSPORT_UDP) ||
+ (mca_p->transport == SDP_TRANSPORT_TCP) ||
+ (mca_p->transport == SDP_TRANSPORT_UDPTL) ||
+ (mca_p->transport == SDP_TRANSPORT_UDPSPRT) ||
+ (mca_p->transport == SDP_TRANSPORT_LOCAL) ||
+ (mca_p->transport == SDP_TRANSPORT_DTLSSCTP)) {
+ /* Port format is simply <port>. Make sure that either
+ * the choose param is allowed or that the choose value
+ * wasn't specified.
+ */
+ if ((sdp_p->conf_p->allow_choose[SDP_CHOOSE_PORTNUM]) ||
+ (num[0] != SDP_CHOOSE_PARAM)) {
+ mca_p->port = num[0];
+ mca_p->port_format = SDP_PORT_NUM_ONLY;
+ valid_param = TRUE;
+ }
+ } else if (mca_p->transport == SDP_TRANSPORT_AAL1AVP) {
+ /* Port format is simply <vcci>, choose param is not allowed.
+ */
+ if (num[0] != SDP_CHOOSE_PARAM) {
+ mca_p->vcci = num[0];
+ mca_p->port_format = SDP_PORT_VCCI;
+ valid_param = TRUE;
+ }
+ } else if ((mca_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+ /* Port format is simply <port>, and choose param is allowed,
+ * according to AAL2 definitions.
+ */
+ mca_p->port = num[0];
+ mca_p->port_format = SDP_PORT_NUM_ONLY;
+ valid_param = TRUE;
+ }
+ break;
+ case 2:
+ if ((mca_p->transport == SDP_TRANSPORT_RTPAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_RTPSAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_RTPSAVPF) ||
+ (mca_p->transport == SDP_TRANSPORT_UDPTLSRTPSAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_UDPTLSRTPSAVPF) ||
+ (mca_p->transport == SDP_TRANSPORT_TCPTLSRTPSAVP) ||
+ (mca_p->transport == SDP_TRANSPORT_TCPTLSRTPSAVPF) ||
+ (mca_p->transport == SDP_TRANSPORT_UDP) ||
+ (mca_p->transport == SDP_TRANSPORT_LOCAL)) {
+ /* Port format is <port>/<num of ports>. Make sure choose
+ * params were not specified.
+ */
+ if ((num[0] != SDP_CHOOSE_PARAM) &&
+ (num[1] != SDP_CHOOSE_PARAM)) {
+ mca_p->port = num[0];
+ mca_p->num_ports = num[1];
+ mca_p->port_format = SDP_PORT_NUM_COUNT;
+ valid_param = TRUE;
+ }
+ } else if (mca_p->transport == SDP_TRANSPORT_UDPTL) {
+ /* Port format is <port>/<num of ports>. Make sure choose
+ * params were not specified. For UDPTL, only "1" may
+ * be specified for number of ports.
+ */
+ if ((num[0] != SDP_CHOOSE_PARAM) &&
+ (num[1] == 1)) {
+ mca_p->port = num[0];
+ mca_p->num_ports = 1;
+ mca_p->port_format = SDP_PORT_NUM_COUNT;
+ valid_param = TRUE;
+ }
+ } else if (mca_p->transport == SDP_TRANSPORT_CES10) {
+ /* Port format is <vpi>/<vci>. Make sure choose
+ * params were not specified.
+ */
+ if ((num[0] != SDP_CHOOSE_PARAM) &&
+ (num[1] != SDP_CHOOSE_PARAM)) {
+ mca_p->vpi = num[0];
+ mca_p->vci = num[1];
+ mca_p->port_format = SDP_PORT_VPI_VCI;
+ valid_param = TRUE;
+ }
+ } else if ((mca_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+ /* Port format is either <vcci>/<cid> or $/$. If one
+ * param is '$' the other must be also. The choose params
+ * are allowed by default and don't need to be allowed
+ * through the appl config.
+ */
+ if (((num[0] != SDP_CHOOSE_PARAM) &&
+ (num[1] != SDP_CHOOSE_PARAM)) ||
+ ((num[0] == SDP_CHOOSE_PARAM) &&
+ (num[1] == SDP_CHOOSE_PARAM))) {
+ mca_p->vcci = num[0];
+ mca_p->cid = num[1];
+ mca_p->port_format = SDP_PORT_VCCI_CID;
+ valid_param = TRUE;
+ }
+ }
+ break;
+ case 3:
+ if (mca_p->transport == SDP_TRANSPORT_AAL1AVP) {
+ /* Port format is <port>/<vpi>/<vci>. Make sure choose
+ * params were not specified.
+ */
+ if ((num[0] != SDP_CHOOSE_PARAM) &&
+ (num[1] != SDP_CHOOSE_PARAM) &&
+ (num[2] != SDP_CHOOSE_PARAM)) {
+ mca_p->port = num[0];
+ mca_p->vpi = num[1];
+ mca_p->vci = num[2];
+ mca_p->port_format = SDP_PORT_NUM_VPI_VCI;
+ valid_param = TRUE;
+ }
+ }
+ break;
+ case 4:
+ if ((mca_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+ /* Port format is <port>/<vpi>/<vci>/<cid>. Make sure choose
+ * params were not specified.
+ */
+ if ((num[0] != SDP_CHOOSE_PARAM) &&
+ (num[1] != SDP_CHOOSE_PARAM) &&
+ (num[2] != SDP_CHOOSE_PARAM) &&
+ (num[3] != SDP_CHOOSE_PARAM)) {
+ mca_p->port = num[0];
+ mca_p->vpi = num[1];
+ mca_p->vci = num[2];
+ mca_p->cid = num[3];
+ mca_p->port_format = SDP_PORT_NUM_VPI_VCI_CID;
+ valid_param = TRUE;
+ }
+ }
+ break;
+ }
+ if (valid_param == FALSE) {
+ sdp_parse_error(sdp_p,
+ "%s Invalid port format (%s) specified for transport "
+ "protocol (%s), parse failed.", sdp_p->debug_str,
+ port, sdp_get_transport_name(mca_p->transport));
+ sdp_p->conf_p->num_invalid_param++;
+ SDP_FREE(mca_p);
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Find payload formats. AAL2 media lines allow multiple
+ * transport/profile types per line, so these are handled differently. */
+ if ((mca_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+
+ if (sdp_parse_multiple_profile_payload_types(sdp_p, mca_p, ptr) !=
+ SDP_SUCCESS) {
+ sdp_p->conf_p->num_invalid_param++;
+ SDP_FREE(mca_p);
+ return (SDP_INVALID_PARAMETER);
+ }
+ /* Parse DTLS/SCTP port */
+ } else if (mca_p->transport == SDP_TRANSPORT_DTLSSCTP) {
+ ptr = sdp_getnextstrtok(ptr, port, sizeof(port), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No sctp port specified in m= media line, "
+ "parse failed.", sdp_p->debug_str);
+ SDP_FREE(mca_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ port_ptr = port;
+
+ if (sdp_getchoosetok(port_ptr, &port_ptr, "/ \t", &result)) {
+ sctp_port = SDP_CHOOSE_PARAM;
+ } else {
+ sctp_port = sdp_getnextnumtok(port_ptr, (const char **)&port_ptr,
+ "/ \t", &result);
+ if (result != SDP_SUCCESS) {
+ sdp_parse_error(sdp_p,
+ "%s No sctp port specified in m= media line, "
+ "parse failed.", sdp_p->debug_str);
+ SDP_FREE(mca_p);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+ mca_p->sctpport = sctp_port;
+ }
+ } else {
+ /* Transport is a non-AAL2 type and not SCTP. Parse payloads
+ normally. */
+ sdp_parse_payload_types(sdp_p, mca_p, ptr);
+ }
+
+
+ /* Media line params are valid. Add it into the SDP. */
+ sdp_p->mca_count++;
+ if (sdp_p->mca_p == NULL) {
+ sdp_p->mca_p = mca_p;
+ } else {
+ for (next_mca_p = sdp_p->mca_p; next_mca_p->next_p != NULL;
+ next_mca_p = next_mca_p->next_p) {
+ ; // Empty For
+ }
+ next_mca_p->next_p = mca_p;
+ }
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+
+ SDP_PRINT("%s Parsed media type %s, ", sdp_p->debug_str,
+ sdp_get_media_name(mca_p->media));
+ switch (mca_p->port_format) {
+ case SDP_PORT_NUM_ONLY:
+ SDP_PRINT("Port num %d, ", mca_p->port);
+ break;
+
+ case SDP_PORT_NUM_COUNT:
+ SDP_PRINT("Port num %d, count %d, ",
+ mca_p->port, mca_p->num_ports);
+ break;
+ case SDP_PORT_VPI_VCI:
+ SDP_PRINT("VPI/VCI %d/%u, ", mca_p->vpi, mca_p->vci);
+ break;
+ case SDP_PORT_VCCI:
+ SDP_PRINT("VCCI %d, ", mca_p->vcci);
+ break;
+ case SDP_PORT_NUM_VPI_VCI:
+ SDP_PRINT("Port %d, VPI/VCI %d/%u, ", mca_p->port,
+ mca_p->vpi, mca_p->vci);
+ break;
+ case SDP_PORT_VCCI_CID:
+ SDP_PRINT("VCCI %d, CID %d, ", mca_p->vcci, mca_p->cid);
+ break;
+ case SDP_PORT_NUM_VPI_VCI_CID:
+ SDP_PRINT("Port %d, VPI/VCI %d/%u, CID %d, ", mca_p->port,
+ mca_p->vpi, mca_p->vci, mca_p->cid);
+ break;
+ default:
+ SDP_PRINT("Port format not valid, ");
+ break;
+ }
+
+ if ((mca_p->transport >= SDP_TRANSPORT_AAL2_ITU) &&
+ (mca_p->transport <= SDP_TRANSPORT_AAL2_CUSTOM)) {
+ for (i=0; i < mca_p->media_profiles_p->num_profiles; i++) {
+ SDP_PRINT("Profile %s, Num payloads %u ",
+ sdp_get_transport_name(mca_p->media_profiles_p->profile[i]),
+ (unsigned)mca_p->media_profiles_p->num_payloads[i]);
+ }
+ } else {
+ SDP_PRINT("Transport %s, Num payloads %u",
+ sdp_get_transport_name(mca_p->transport),
+ (unsigned)mca_p->num_payloads);
+ }
+ }
+ return (SDP_SUCCESS);
+}
+
+sdp_result_e sdp_build_media (sdp_t *sdp_p, uint16_t level, flex_string *fs)
+{
+ int i, j;
+ sdp_mca_t *mca_p;
+ tinybool invalid_params=FALSE;
+ sdp_media_profiles_t *profile_p;
+
+ /* Find the right media line */
+ mca_p = sdp_find_media_level(sdp_p, level);
+ if (mca_p == NULL) {
+ return (SDP_FAILURE);
+ }
+
+ /* Validate params for this media line */
+ if ((mca_p->media >= SDP_MAX_MEDIA_TYPES) ||
+ (mca_p->port_format >= SDP_MAX_PORT_FORMAT_TYPES) ||
+ (mca_p->transport >= SDP_MAX_TRANSPORT_TYPES)) {
+ invalid_params = TRUE;
+ }
+
+ if (invalid_params == TRUE) {
+ CSFLogError(logTag, "%s Invalid params for m= media line, "
+ "build failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ }
+
+ /* Build the media type */
+ flex_string_sprintf(fs, "m=%s ", sdp_get_media_name(mca_p->media));
+
+ /* Build the port based on the specified port format */
+ if (mca_p->port_format == SDP_PORT_NUM_ONLY) {
+ if (mca_p->port == SDP_CHOOSE_PARAM) {
+ flex_string_sprintf(fs, "$ ");
+ } else {
+ flex_string_sprintf(fs, "%u ", (unsigned)mca_p->port);
+ }
+ } else if (mca_p->port_format == SDP_PORT_NUM_COUNT) {
+ flex_string_sprintf(fs, "%u/%u ", (unsigned)mca_p->port,
+ (unsigned)mca_p->num_ports);
+ } else if (mca_p->port_format == SDP_PORT_VPI_VCI) {
+ flex_string_sprintf(fs, "%u/%u ",
+ (unsigned)mca_p->vpi, (unsigned)mca_p->vci);
+ } else if (mca_p->port_format == SDP_PORT_VCCI) {
+ flex_string_sprintf(fs, "%u ", (unsigned)mca_p->vcci);
+ } else if (mca_p->port_format == SDP_PORT_NUM_VPI_VCI) {
+ flex_string_sprintf(fs, "%u/%u/%u ", (unsigned)mca_p->port,
+ (unsigned)mca_p->vpi, (unsigned)mca_p->vci);
+ } else if (mca_p->port_format == SDP_PORT_VCCI_CID) {
+ if ((mca_p->vcci == SDP_CHOOSE_PARAM) &&
+ (mca_p->cid == SDP_CHOOSE_PARAM)) {
+ flex_string_sprintf(fs, "$/$ ");
+ } else if ((mca_p->vcci == SDP_CHOOSE_PARAM) ||
+ (mca_p->cid == SDP_CHOOSE_PARAM)) {
+ /* If one is set but not the other, this is an error. */
+ CSFLogError(logTag, "%s Invalid params for m= port parameter, "
+ "build failed.", sdp_p->debug_str);
+ sdp_p->conf_p->num_invalid_param++;
+ return (SDP_INVALID_PARAMETER);
+ } else {
+ flex_string_sprintf(fs, "%u/%u ",
+ (unsigned)mca_p->vcci, (unsigned)mca_p->cid);
+ }
+ } else if (mca_p->port_format == SDP_PORT_NUM_VPI_VCI_CID) {
+ flex_string_sprintf(fs, "%u/%u/%u/%u ", (unsigned)mca_p->port,
+ (unsigned)mca_p->vpi, (unsigned)mca_p->vci, (unsigned)mca_p->cid);
+ }
+
+ /* If the media line has AAL2 profiles, build them differently. */
+ if ((mca_p->transport == SDP_TRANSPORT_AAL2_ITU) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_ATMF) ||
+ (mca_p->transport == SDP_TRANSPORT_AAL2_CUSTOM)) {
+ profile_p = mca_p->media_profiles_p;
+ for (i=0; i < profile_p->num_profiles; i++) {
+ flex_string_sprintf(fs, "%s",
+ sdp_get_transport_name(profile_p->profile[i]));
+
+ for (j=0; j < profile_p->num_payloads[i]; j++) {
+ flex_string_sprintf(fs, " %u",
+ (unsigned)profile_p->payload_type[i][j]);
+ }
+ flex_string_sprintf(fs, " ");
+ }
+ flex_string_sprintf(fs, "\n");
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built m= media line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+ }
+
+ /* Build the transport name */
+ flex_string_sprintf(fs, "%s",
+ sdp_get_transport_name(mca_p->transport));
+
+ if(mca_p->transport != SDP_TRANSPORT_DTLSSCTP) {
+
+ /* Build the format lists */
+ for (i=0; i < mca_p->num_payloads; i++) {
+ if (mca_p->payload_indicator[i] == SDP_PAYLOAD_ENUM) {
+ flex_string_sprintf(fs, " %s",
+ sdp_get_payload_name((sdp_payload_e)mca_p->payload_type[i]));
+ } else {
+ flex_string_sprintf(fs, " %u", (unsigned)mca_p->payload_type[i]);
+ }
+ }
+ } else {
+ /* Add port to SDP if transport is DTLS/SCTP */
+ flex_string_sprintf(fs, " %u", (unsigned)mca_p->sctpport);
+ }
+
+ flex_string_sprintf(fs, "\r\n");
+
+ if (sdp_p->debug_flag[SDP_DEBUG_TRACE]) {
+ SDP_PRINT("%s Built m= media line", sdp_p->debug_str);
+ }
+ return (SDP_SUCCESS);
+}
+
+
+/* Function: sdp_parse_payload_types
+ * Description: Parse a list of payload types. The list may be part of
+ * a media line or part of a capability line.
+ * Parameters: sdp_ptr The SDP handle returned by sdp_init_description.
+ * mca_p The mca structure the payload types should be
+ * added to.
+ * ptr The pointer to the list of payloads.
+ * Returns: Nothing.
+ */
+void sdp_parse_payload_types (sdp_t *sdp_p, sdp_mca_t *mca_p, const char *ptr)
+{
+ uint16_t i;
+ uint16_t num_payloads;
+ sdp_result_e result;
+ tinybool valid_payload;
+ char tmp[SDP_MAX_STRING_LEN];
+ char *tmp2;
+
+ for (num_payloads = 0; (num_payloads < SDP_MAX_PAYLOAD_TYPES); ) {
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ /* If there are no more payload types, we're finished */
+ break;
+ }
+ mca_p->payload_type[num_payloads] = (uint16_t)sdp_getnextnumtok(tmp,
+ (const char **)&tmp2,
+ " \t", &result);
+ if (result == SDP_SUCCESS) {
+ if ((mca_p->media == SDP_MEDIA_IMAGE) &&
+ (mca_p->transport == SDP_TRANSPORT_UDPTL)) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Numeric payload type not "
+ "valid for media %s with transport %s.",
+ sdp_p->debug_str,
+ sdp_get_media_name(mca_p->media),
+ sdp_get_transport_name(mca_p->transport));
+ } else {
+ mca_p->payload_indicator[num_payloads] = SDP_PAYLOAD_NUMERIC;
+ mca_p->num_payloads++;
+ num_payloads++;
+ }
+ continue;
+ }
+
+ valid_payload = FALSE;
+ for (i=0; i < SDP_MAX_STRING_PAYLOAD_TYPES; i++) {
+ if (cpr_strncasecmp(tmp, sdp_payload[i].name,
+ sdp_payload[i].strlen) == 0) {
+ valid_payload = TRUE;
+ break;
+ }
+ }
+ if (valid_payload == TRUE) {
+ /* We recognized the payload type. Make sure it
+ * is valid for this media line. */
+ valid_payload = FALSE;
+ if ((mca_p->media == SDP_MEDIA_IMAGE) &&
+ (mca_p->transport == SDP_TRANSPORT_UDPTL) &&
+ (i == SDP_PAYLOAD_T38)) {
+ valid_payload = TRUE;
+ } else if ((mca_p->media == SDP_MEDIA_APPLICATION) &&
+ (mca_p->transport == SDP_TRANSPORT_UDP) &&
+ (i == SDP_PAYLOAD_XTMR)) {
+ valid_payload = TRUE;
+ } else if ((mca_p->media == SDP_MEDIA_APPLICATION) &&
+ (mca_p->transport == SDP_TRANSPORT_TCP) &&
+ (i == SDP_PAYLOAD_T120)) {
+ valid_payload = TRUE;
+ }
+
+ if (valid_payload == TRUE) {
+ mca_p->payload_indicator[num_payloads] = SDP_PAYLOAD_ENUM;
+ mca_p->payload_type[num_payloads] = i;
+ mca_p->num_payloads++;
+ num_payloads++;
+ } else {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Payload type %s not valid for "
+ "media %s with transport %s.",
+ sdp_p->debug_str,
+ sdp_get_payload_name((sdp_payload_e)i),
+ sdp_get_media_name(mca_p->media),
+ sdp_get_transport_name(mca_p->transport));
+ }
+ } else {
+ /* Payload type wasn't recognized. */
+ sdp_parse_error(sdp_p,
+ "%s Warning: Payload type "
+ "unsupported (%s).", sdp_p->debug_str, tmp);
+ }
+ }
+ if (mca_p->num_payloads == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No payload types specified.",
+ sdp_p->debug_str);
+ }
+}
+
+
+/* Function: sdp_parse_multiple_profile_payload_types
+ * Description: Parse a list of payload types. The list may be part of
+ * a media line or part of a capability line.
+ * Parameters: sdp_ptr The SDP handle returned by sdp_init_description.
+ * mca_p The mca structure the payload types should be
+ * added to.
+ * ptr The pointer to the list of payloads.
+ * Returns: Nothing.
+ */
+sdp_result_e sdp_parse_multiple_profile_payload_types (sdp_t *sdp_p,
+ sdp_mca_t *mca_p,
+ const char *ptr)
+{
+ uint16_t i;
+ uint16_t prof;
+ uint16_t payload;
+ sdp_result_e result;
+ sdp_media_profiles_t *profile_p;
+ char tmp[SDP_MAX_STRING_LEN];
+ char *tmp2;
+
+ /* If the transport type is any of the AAL2 formats, then we
+ * need to look for multiple AAL2 profiles and their associated
+ * payload lists. */
+ mca_p->media_profiles_p = (sdp_media_profiles_t *) \
+ SDP_MALLOC(sizeof(sdp_media_profiles_t));
+ if (mca_p->media_profiles_p == NULL) {
+ sdp_p->conf_p->num_no_resource++;
+ SDP_FREE(mca_p);
+ return (SDP_NO_RESOURCE);
+ }
+ profile_p = mca_p->media_profiles_p;
+ /* Set the first profile to the one already detected. */
+ profile_p->num_profiles = 1;
+ prof = 0;
+ payload = 0;
+ profile_p->profile[prof] = mca_p->transport;
+ profile_p->num_payloads[prof] = 0;
+
+ /* Now find the payload type lists and any other profile types */
+ while (TRUE) {
+ ptr = sdp_getnextstrtok(ptr, tmp, sizeof(tmp), " \t", &result);
+ if (result != SDP_SUCCESS) {
+ /* If there are no more payload types, we're finished */
+ break;
+ }
+
+ /* See if the next token is a new profile type. */
+ if (prof < SDP_MAX_PROFILES) {
+ profile_p->profile[prof+1] = SDP_TRANSPORT_UNSUPPORTED;
+ for (i=SDP_TRANSPORT_AAL2_ITU;
+ i <= SDP_TRANSPORT_AAL2_CUSTOM; i++) {
+ if (cpr_strncasecmp(tmp, sdp_transport[i].name,
+ sdp_transport[i].strlen) == 0) {
+ profile_p->profile[prof+1] = (sdp_transport_e)i;
+ break;
+ }
+ }
+ /* If we recognized the profile type, start looking for the
+ * next payload list. */
+ if (profile_p->profile[prof+1] != SDP_TRANSPORT_UNSUPPORTED) {
+ /* Now reset the payload counter for the next profile type. */
+ payload = 0;
+ prof++;
+ profile_p->num_profiles++;
+ if (prof < SDP_MAX_PROFILES) {
+ profile_p->num_payloads[prof] = 0;
+ }
+ continue;
+ }
+ }
+
+ /* This token must be a payload type. Make sure there aren't
+ * too many payload types. */
+ if (payload >= SDP_MAX_PAYLOAD_TYPES) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: Too many payload types "
+ "found, truncating.", sdp_p->debug_str);
+ continue;
+ }
+
+ /* See if the payload type is numeric. */
+ if (prof < SDP_MAX_PROFILES && payload < SDP_MAX_PAYLOAD_TYPES) {
+ profile_p->payload_type[prof][payload] = (uint16_t)sdp_getnextnumtok(tmp,
+ (const char **)&tmp2,
+ " \t", &result);
+ if (result == SDP_SUCCESS) {
+ profile_p->payload_indicator[prof][payload] = SDP_PAYLOAD_NUMERIC;
+ profile_p->num_payloads[prof]++;
+ payload++;
+ continue;
+ }
+ }
+
+ /* No string payload types are currently valid for the AAL2
+ * transport types. This support can be added when needed. */
+ sdp_parse_error(sdp_p,
+ "%s Warning: Unsupported payload type "
+ "found (%s).", sdp_p->debug_str, tmp);
+ }
+ for (i=0; i < profile_p->num_profiles; i++) {
+ /* Make sure we have payloads for each profile type. */
+ if (profile_p->num_payloads[i] == 0) {
+ sdp_parse_error(sdp_p,
+ "%s Warning: No payload types specified "
+ "for AAL2 profile %s.", sdp_p->debug_str,
+ sdp_get_transport_name(profile_p->profile[i]));
+ }
+ }
+ return (SDP_SUCCESS);
+}
diff --git a/media/webrtc/signaling/src/sdp/sipcc/sdp_utils.c b/media/webrtc/signaling/src/sdp/sipcc/sdp_utils.c
new file mode 100644
index 000000000..a02035c72
--- /dev/null
+++ b/media/webrtc/signaling/src/sdp/sipcc/sdp_utils.c
@@ -0,0 +1,781 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <errno.h>
+#include <limits.h>
+#include <ctype.h>
+#include "sdp_os_defs.h"
+#include "sdp.h"
+#include "sdp_private.h"
+
+#include "CSFLog.h"
+
+#define MKI_BUF_LEN 4
+
+static const char* logTag = "sdp_utils";
+
+sdp_mca_t *sdp_alloc_mca (uint32_t line) {
+ sdp_mca_t *mca_p;
+
+ /* Allocate resource for new media stream. */
+ mca_p = (sdp_mca_t *)SDP_MALLOC(sizeof(sdp_mca_t));
+ if (mca_p == NULL) {
+ return (NULL);
+ }
+ /* Initialize mca structure */
+ mca_p->media = SDP_MEDIA_INVALID;
+ mca_p->conn.nettype = SDP_NT_INVALID;
+ mca_p->conn.addrtype = SDP_AT_INVALID;
+ mca_p->conn.conn_addr[0] = '\0';
+ mca_p->conn.is_multicast = FALSE;
+ mca_p->conn.ttl = 0;
+ mca_p->conn.num_of_addresses = 0;
+ mca_p->transport = SDP_TRANSPORT_INVALID;
+ mca_p->port = SDP_INVALID_VALUE;
+ mca_p->num_ports = SDP_INVALID_VALUE;
+ mca_p->vpi = SDP_INVALID_VALUE;
+ mca_p->vci = 0;
+ mca_p->vcci = SDP_INVALID_VALUE;
+ mca_p->cid = SDP_INVALID_VALUE;
+ mca_p->num_payloads = 0;
+ mca_p->sessinfo_found = FALSE;
+ mca_p->encrypt.encrypt_type = SDP_ENCRYPT_INVALID;
+ mca_p->media_attrs_p = NULL;
+ mca_p->next_p = NULL;
+ mca_p->mid = 0;
+ mca_p->bw.bw_data_count = 0;
+ mca_p->bw.bw_data_list = NULL;
+ mca_p->line_number = line;
+
+ return (mca_p);
+}
+
+/*
+ * next_token
+ *
+ * copy token param with chars from str until null, cr, lf, or one of the delimiters is found.
+ * delimiters at the beginning will be skipped.
+ * The pointer *string_of_tokens is moved forward to the next token on sucess.
+ *
+ */
+static sdp_result_e next_token(const char **string_of_tokens, char *token, unsigned token_max_len, const char *delim)
+{
+ int flag2moveon = 0;
+ const char *str;
+ const char *token_end;
+ const char *next_delim;
+
+ if (!string_of_tokens || !*string_of_tokens || !token || !delim) {
+ return SDP_FAILURE;
+ }
+
+ str = *string_of_tokens;
+ token_end = token + token_max_len - 1;
+
+ /* Locate front of token, skipping any delimiters */
+ for ( ; ((*str != '\0') && (*str != '\n') && (*str != '\r')); str++) {
+ flag2moveon = 1; /* Default to move on unless we find a delimiter */
+ for (next_delim=delim; *next_delim; next_delim++) {
+ if (*str == *next_delim) {
+ flag2moveon = 0;
+ break;
+ }
+ }
+ if( flag2moveon ) {
+ break; /* We're at the beginning of the token */
+ }
+ }
+
+ /* Make sure there's really a token present. */
+ if ((*str == '\0') || (*str == '\n') || (*str == '\r')) {
+ return SDP_EMPTY_TOKEN;
+ }
+
+ /* Now locate end of token */
+ flag2moveon = 0;
+
+ while ((token < token_end) &&
+ (*str != '\0') && (*str != '\n') && (*str != '\r')) {
+ for (next_delim=delim; *next_delim; next_delim++) {
+ if (*str == *next_delim) {
+ flag2moveon = 1;
+ break;
+ }
+ }
+ if( flag2moveon ) {
+ break;
+ } else {
+ *token++ = *str++;
+ }
+ }
+
+ /* mark end of token */
+ *token = '\0';
+
+ /* set the string of tokens to the next token */
+ *string_of_tokens = str;
+
+ return SDP_SUCCESS;
+}
+
+/*
+ * verify_sdescriptions_mki
+ *
+ * Verifies the syntax of the MKI parameter.
+ *
+ * mki = mki-value ":" mki-length
+ * mki-value = 1*DIGIT
+ * mki-length = 1*3DIGIT ; range 1..128
+ *
+ * Inputs:
+ * buf - ptr to start of MKI string assumes NULL
+ * terminated string
+ * mkiValue - buffer to store the MKI value, assumes calling
+ * function has provided memory for this.
+ * mkiLen - integer to store the MKI length
+ *
+ * Outputs:
+ * Returns TRUE if syntax is correct and stores the
+ * MKI value in mkiVal and stores the length in mkiLen.
+ * Returns FALSE otherwise.
+ */
+
+tinybool
+verify_sdescriptions_mki (char *buf, char *mkiVal, uint16_t *mkiLen)
+{
+
+ char *ptr,
+ mkiValBuf[SDP_SRTP_MAX_MKI_SIZE_BYTES],
+ mkiLenBuf[MKI_BUF_LEN];
+ int idx = 0;
+ unsigned long strtoul_result;
+ char *strtoul_end;
+
+ ptr = buf;
+ /* MKI must begin with a digit */
+ if (!ptr || (!isdigit((int) *ptr))) {
+ return FALSE;
+ }
+
+ /* scan until we reach a non-digit or colon */
+ while (*ptr) {
+ if (*ptr == ':') {
+ /* terminate the MKI value */
+ mkiValBuf[idx] = 0;
+ ptr++;
+ break;
+ } else if ((isdigit((int) *ptr) && (idx < SDP_SRTP_MAX_MKI_SIZE_BYTES-1))) {
+ mkiValBuf[idx++] = *ptr;
+ } else {
+ return FALSE;
+ }
+
+ ptr++;
+ }
+
+ /* there has to be a mki length */
+ if (*ptr == 0) {
+ return FALSE;
+ }
+
+ idx = 0;
+
+ /* verify the mki length (max 3 digits) */
+ while (*ptr) {
+ if (isdigit((int) *ptr) && (idx < 3)) {
+ mkiLenBuf[idx++] = *ptr;
+ } else {
+ return FALSE;
+ }
+
+ ptr++;
+ }
+
+ mkiLenBuf[idx] = 0;
+
+ errno = 0;
+ strtoul_result = strtoul(mkiLenBuf, &strtoul_end, 10);
+
+ /* mki len must be between 1..128 */
+ if (errno || mkiLenBuf == strtoul_end || strtoul_result < 1 || strtoul_result > 128) {
+ *mkiLen = 0;
+ return FALSE;
+ }
+
+ *mkiLen = (uint16_t) strtoul_result;
+ sstrncpy(mkiVal, mkiValBuf, MKI_BUF_LEN);
+
+ return TRUE;
+}
+
+/*
+ * verify_srtp_lifetime
+ *
+ * Verifies the Lifetime parameter syntax.
+ *
+ * lifetime = ["2^"] 1*(DIGIT)
+ *
+ * Inputs:
+ * buf - pointer to start of lifetime string. Assumes string is
+ * NULL terminated.
+ * Outputs:
+ * Returns TRUE if syntax is correct. Returns FALSE otherwise.
+ */
+
+tinybool
+verify_sdescriptions_lifetime (char *buf)
+{
+
+ char *ptr;
+ tinybool tokenFound = FALSE;
+
+ ptr = buf;
+ if (!ptr || *ptr == 0) {
+ return FALSE;
+ }
+
+ while (*ptr) {
+ if (*ptr == '^') {
+ if (tokenFound) {
+ /* make sure we don't have multiple ^ */
+ return FALSE;
+ } else {
+ tokenFound = TRUE;
+ /* Lifetime is in power of 2 format, make sure first and second
+ * chars are 2^
+ */
+
+ if (buf[0] != '2' || buf[1] != '^') {
+ return FALSE;
+ }
+ }
+ } else if (!isdigit((int) *ptr)) {
+ return FALSE;
+ }
+
+ ptr++;
+
+ }
+
+ /* Make sure if the format is 2^ that there is a number after the ^. */
+ if (tokenFound) {
+ if (strlen(buf) <= 2) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
+/*
+ * sdp_validate_maxprate
+ *
+ * This function validates that the string passed in is of the form:
+ * packet-rate = 1*DIGIT ["." 1*DIGIT]
+ */
+tinybool
+sdp_validate_maxprate(const char *string_parm)
+{
+ tinybool retval = FALSE;
+
+ if (string_parm && (*string_parm)) {
+ while (isdigit((int)*string_parm)) {
+ string_parm++;
+ }
+
+ if (*string_parm == '.') {
+ string_parm++;
+ while (isdigit((int)*string_parm)) {
+ string_parm++;
+ }
+ }
+
+ if (*string_parm == '\0') {
+ retval = TRUE;
+ } else {
+ retval = FALSE;
+ }
+ }
+
+ return retval;
+}
+
+char *sdp_findchar (const char *ptr, char *char_list)
+{
+ int i;
+
+ for (;*ptr != '\0'; ptr++) {
+ for (i=0; char_list[i] != '\0'; i++) {
+ if (*ptr == char_list[i]) {
+ return ((char *)ptr);
+ }
+ }
+ }
+ return ((char *)ptr);
+}
+
+/* Locate the next token in a line. The delim characters are passed in
+ * as a param. The token also will not go past a new line char or the
+ * end of the string. Skip any delimiters before the token.
+ */
+const char *sdp_getnextstrtok (const char *str, char *tokenstr, unsigned tokenstr_len,
+ const char *delim, sdp_result_e *result)
+{
+ const char *token_list = str;
+
+ if (!str || !tokenstr || !delim || !result) {
+ if (result) {
+ *result = SDP_FAILURE;
+ }
+ return str;
+ }
+
+ *result = next_token(&token_list, tokenstr, tokenstr_len, delim);
+
+ return token_list;
+}
+
+
+
+/* Locate the next null ("-") or numeric token in a string. The delim
+ * characters are passed in as a param. The token also will not go past
+ * a new line char or the end of the string. Skip any delimiters before
+ * the token.
+ */
+uint32_t sdp_getnextnumtok_or_null (const char *str, const char **str_end,
+ const char *delim, tinybool *null_ind,
+ sdp_result_e *result)
+{
+ const char *token_list = str;
+ char temp_token[SDP_MAX_STRING_LEN];
+ char *strtoul_end;
+ unsigned long numval;
+
+ if (null_ind) {
+ *null_ind = FALSE;
+ }
+
+ if (!str || !str_end || !delim || !null_ind || !result) {
+ if (result) {
+ *result = SDP_FAILURE;
+ }
+ return 0;
+ }
+
+ *result = next_token(&token_list, temp_token, sizeof(temp_token), delim);
+
+ if (*result != SDP_SUCCESS) {
+ return 0;
+ }
+
+ /* First see if its the null char ("-") */
+ if (temp_token[0] == '-') {
+ *null_ind = TRUE;
+ *result = SDP_SUCCESS;
+ *str_end = str;
+ return 0;
+ }
+
+ errno = 0;
+ numval = strtoul(temp_token, &strtoul_end, 10);
+
+ if (errno || strtoul_end == temp_token || numval > UINT_MAX) {
+ *result = SDP_FAILURE;
+ return 0;
+ }
+
+ *result = SDP_SUCCESS;
+ *str_end = token_list;
+ return (uint32_t) numval;
+}
+
+
+/* Locate the next numeric token in a string. The delim characters are
+ * passed in as a param. The token also will not go past a new line char
+ * or the end of the string. Skip any delimiters before the token.
+ */
+uint32_t sdp_getnextnumtok (const char *str, const char **str_end,
+ const char *delim, sdp_result_e *result)
+{
+ const char *token_list = str;
+ char temp_token[SDP_MAX_STRING_LEN];
+ char *strtoul_end;
+ unsigned long numval;
+
+ if (!str || !str_end || !delim || !result) {
+ if (result) {
+ *result = SDP_FAILURE;
+ }
+ return 0;
+ }
+
+ *result = next_token(&token_list, temp_token, sizeof(temp_token), delim);
+
+ if (*result != SDP_SUCCESS) {
+ return 0;
+ }
+
+ errno = 0;
+ numval = strtoul(temp_token, &strtoul_end, 10);
+
+ if (errno || strtoul_end == temp_token || numval > UINT_MAX) {
+ *result = SDP_FAILURE;
+ return 0;
+ }
+
+ *result = SDP_SUCCESS;
+ *str_end = token_list;
+ return (uint32_t) numval;
+}
+
+
+/* See if the next token in a string is the choose character. The delim
+ * characters are passed in as a param. The check also will not go past
+ * a new line char or the end of the string. Skip any delimiters before
+ * the token.
+ */
+tinybool sdp_getchoosetok (const char *str, const char **str_end,
+ const char *delim, sdp_result_e *result)
+{
+ const char *b;
+ int flag2moveon;
+
+ if ((str == NULL) || (str_end == NULL)) {
+ *result = SDP_FAILURE;
+ return(FALSE);
+ }
+
+ /* Locate front of token, skipping any delimiters */
+ for ( ; ((*str != '\0') && (*str != '\n') && (*str != '\r')); str++) {
+ flag2moveon = 1; /* Default to move on unless we find a delimiter */
+ for (b=delim; *b; b++) {
+ if (*str == *b) {
+ flag2moveon = 0;
+ break;
+ }
+ }
+ if( flag2moveon ) {
+ break; /* We're at the beginning of the token */
+ }
+ }
+
+ /* Make sure there's really a token present. */
+ if ((*str == '\0') || (*str == '\n') || (*str == '\r')) {
+ *result = SDP_FAILURE;
+ *str_end = (char *)str;
+ return(FALSE);
+ }
+
+ /* See if the token is '$' followed by a delimiter char or end of str. */
+ if (*str == '$') {
+ str++;
+ if ((*str == '\0') || (*str == '\n') || (*str == '\r')) {
+ *result = SDP_SUCCESS;
+ /* skip the choose char in the string. */
+ *str_end = (char *)(str+1);
+ return(TRUE);
+ }
+ for (b=delim; *b; b++) {
+ if (*str == *b) {
+ *result = SDP_SUCCESS;
+ /* skip the choose char in the string. */
+ *str_end = (char *)(str+1);
+ return(TRUE);
+ }
+ }
+ }
+
+ /* If the token was not '$' followed by a delim, token is not choose */
+ *result = SDP_SUCCESS;
+ *str_end = (char *)str;
+ return(FALSE);
+
+}
+
+/*
+ * SDP Crypto Utility Functions.
+ *
+ * First a few common definitions.
+ */
+
+/*
+ * Constants
+ *
+ * crypto_string = The string used to identify the start of sensative
+ * crypto data.
+ *
+ * inline_string = The string used to identify the start of key/salt
+ * crypto data.
+ *
+ * star_string = The string used to overwrite sensative data.
+ *
+ * '*_strlen' = The length of '*_string' in bytes (not including '\0')
+ */
+static const char crypto_string[] = "X-crypto:";
+static const int crypto_strlen = sizeof(crypto_string) - 1;
+static const char inline_string[] = "inline:";
+static const int inline_strlen = sizeof(inline_string) - 1;
+/* 40 characters is the current maximum for a Base64 encoded key/salt */
+static const char star_string[] = "****************************************";
+static const int star_strlen = sizeof(star_string) - 1;
+
+/*
+ * MIN_CRYPTO_STRING_SIZE_BYTES = This value defines the minimum
+ * size of a string that could contain a key/salt. This value
+ * is used to skip out of parsing when there is no reasonable
+ * assumption that sensative data will be found. The general
+ * format of a SRTP Key Salt in SDP looks like:
+ *
+ * X-crypto:<crypto_suite_name> inline:<master_key_salt>||
+ *
+ * if <crypto_suite_name> and <master_key_salt> is at least
+ * one character and one space is used before the "inline:",
+ * then this translates to a size of (aligned by collumn from
+ * the format shown above):
+ *
+ * 9+ 1+ 1+7+ 1+ 2 = 21
+ *
+ */
+#define MIN_CRYPTO_STRING_SIZE_BYTES 21
+
+/*
+ * Utility macros
+ *
+ * CHAR_IS_WHITESPACE = macro to determine if the passed _test_char
+ * is whitespace.
+ *
+ * SKIP_WHITESPACE = Macro to advance _cptr to the next non-whitespace
+ * character. _cptr will not be advanced past _max_cptr.
+ *
+ * FIND_WHITESPACE = Macro to advance _cptr until whitespace is found.
+ * _cptr will not be advanced past _max_cptr.
+ */
+#define CHAR_IS_WHITESPACE(_test_char) \
+ ((((_test_char)==' ')||((_test_char)=='\t'))?1:0)
+
+#define SKIP_WHITESPACE(_cptr, _max_cptr) \
+ while ((_cptr)<=(_max_cptr)) { \
+ if (!CHAR_IS_WHITESPACE(*(_cptr))) break; \
+ (_cptr)++; \
+ }
+
+#define FIND_WHITESPACE(_cptr, _max_cptr) \
+ while ((_cptr)<=(_max_cptr)) { \
+ if (CHAR_IS_WHITESPACE(*(_cptr))) break; \
+ (_cptr)++; \
+ }
+
+/* Function: sdp_crypto_debug
+ * Description: Check the passed buffer for sensitive data that should
+ * not be output (such as SRTP Master Key/Salt) and output
+ * the buffer as debug. Sensitive data will be replaced
+ * with the '*' character(s). This function may be used
+ * to display very large buffers so this function ensures
+ * that buginf is not overloaded.
+ * Parameters: buffer pointer to the message buffer to filter.
+ * length_bytes size of message buffer in bytes.
+ * Returns: Nothing.
+ */
+void sdp_crypto_debug (char *buffer, ulong length_bytes)
+{
+ char *current, *start;
+ char *last = buffer + length_bytes;
+ int result;
+
+ /*
+ * For SRTP Master Key/Salt has the form:
+ * X-crypto:<crypto_suite_name> inline:<master_key_salt>||
+ * Where <master_key_salt> is the data to elide (filter).
+ */
+ for (start=current=buffer;
+ current<=last-MIN_CRYPTO_STRING_SIZE_BYTES;
+ current++) {
+ if ((*current == 'x') || (*current == 'X')) {
+ result = cpr_strncasecmp(current, crypto_string, crypto_strlen);
+ if (!result) {
+ current += crypto_strlen;
+ if (current > last) break;
+
+ /* Skip over crypto suite name */
+ FIND_WHITESPACE(current, last);
+
+ /* Skip over whitespace */
+ SKIP_WHITESPACE(current, last);
+
+ /* identify inline keyword */
+ result = cpr_strncasecmp(current, inline_string, inline_strlen);
+ if (!result) {
+ int star_count = 0;
+
+ current += inline_strlen;
+ if (current > last) break;
+
+ sdp_dump_buffer(start, current - start);
+
+ /* Hide sensitive key/salt data */
+ while (current<=last) {
+ if (*current == '|' || *current == '\n') {
+ /* Done, print the stars */
+ while (star_count > star_strlen) {
+ /*
+ * This code is only for the case where
+ * too much base64 data was supplied
+ */
+ sdp_dump_buffer((char*)star_string, star_strlen);
+ star_count -= star_strlen;
+ }
+ sdp_dump_buffer((char*)star_string, star_count);
+ break;
+ } else {
+ star_count++;
+ current++;
+ }
+ }
+ /* Update start pointer */
+ start=current;
+ }
+ }
+ }
+ }
+
+ if (last > start) {
+ /* Display remainder of buffer */
+ sdp_dump_buffer(start, last - start);
+ }
+}
+
+/*
+ * sdp_debug_msg_filter
+ *
+ * DESCRIPTION
+ * Check the passed message buffer for sensitive data that should
+ * not be output (such as SRTP Master Key/Salt). Sensitive data
+ * will be replaced with the '*' character(s).
+ *
+ * PARAMETERS
+ * buffer: pointer to the message buffer to filter.
+ *
+ * length_bytes: size of message buffer in bytes.
+ *
+ * RETURN VALUE
+ * The buffer modified.
+ */
+char * sdp_debug_msg_filter (char *buffer, ulong length_bytes)
+{
+ char *current;
+ char *last = buffer + length_bytes;
+ int result;
+
+ SDP_PRINT("\n%s:%d: Eliding sensitive data from debug output",
+ __FILE__, __LINE__);
+ /*
+ * For SRTP Master Key/Salt has the form:
+ * X-crypto:<crypto_suite_name> inline:<master_key_salt>||
+ * Where <master_key_salt> is the data to elide (filter).
+ */
+ for (current=buffer;
+ current<=last-MIN_CRYPTO_STRING_SIZE_BYTES;
+ current++) {
+ if ((*current == 'x') || (*current == 'X')) {
+ result = cpr_strncasecmp(current, crypto_string, crypto_strlen);
+ if (!result) {
+ current += crypto_strlen;
+ if (current > last) break;
+
+ /* Skip over crypto suite name */
+ FIND_WHITESPACE(current, last);
+
+ /* Skip over whitespace */
+ SKIP_WHITESPACE(current, last);
+
+ /* identify inline keyword */
+ result = cpr_strncasecmp(current, inline_string, inline_strlen);
+ if (!result) {
+ current += inline_strlen;
+ if (current > last) break;
+
+ /* Hide sensitive key/salt data */
+ while (current<=last) {
+ if (*current == '|' || *current == '\n') {
+ /* Done */
+ break;
+ } else {
+ *current = '*';
+ current++;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return buffer;
+}
+
+
+/* Function: sdp_checkrange
+ * Description: This checks the range of a ulong value to make sure its
+ * within the range of 0 and 4Gig. stroul cannot be used since
+ * for values greater greater than 4G, stroul will either wrap
+ * around or return ULONG_MAX.
+ * Parameters: sdp_p Pointer to the sdp structure
+ * num The number to check the range for
+ * u_val This variable get populated with the ulong value
+ * if the number is within the range.
+ * Returns: tinybool - returns TRUE if the number passed is within the
+ * range, FALSE otherwise
+ */
+tinybool sdp_checkrange (sdp_t *sdp_p, char *num, ulong *u_val)
+{
+ ulong l_val;
+ char *endP = NULL;
+ *u_val = 0;
+
+ if (!num || !*num) {
+ return FALSE;
+ }
+
+ if (*num == '-') {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s ERROR: Parameter value is a negative number: %s",
+ sdp_p->debug_str, num);
+ }
+ return FALSE;
+ }
+
+ l_val = strtoul(num, &endP, 10);
+ if (*endP == '\0') {
+
+ if (l_val > 4294967295UL) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s ERROR: Parameter value: %s is greater than 4294967295",
+ sdp_p->debug_str, num);
+ }
+ return FALSE;
+ }
+
+ if (l_val == 4294967295UL) {
+ /*
+ * On certain platforms where ULONG_MAX is equivalent to
+ * 4294967295, strtoul will return ULONG_MAX even if the the
+ * value of the string is greater than 4294967295. To detect
+ * that scenario we make an explicit check here.
+ */
+ if (strcmp("4294967295", num)) {
+ if (sdp_p->debug_flag[SDP_DEBUG_ERRORS]) {
+ CSFLogError(logTag, "%s ERROR: Parameter value: %s is greater than 4294967295",
+ sdp_p->debug_str, num);
+ }
+ return FALSE;
+ }
+ }
+ }
+ *u_val = l_val;
+ return TRUE;
+}
+
+#undef CHAR_IS_WHITESPACE
+#undef SKIP_WHITESPACE
+#undef FIND_WHITESPACE
diff --git a/media/webrtc/signaling/test/FakeIPC.cpp b/media/webrtc/signaling/test/FakeIPC.cpp
new file mode 100644
index 000000000..767082c29
--- /dev/null
+++ b/media/webrtc/signaling/test/FakeIPC.cpp
@@ -0,0 +1,35 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FakeIPC.h"
+#include <unistd.h>
+
+// The implementations can't be in the .h file for some annoying reason
+
+/* static */ void
+PlatformThread:: YieldCurrentThread()
+{
+ sleep(1);
+}
+
+namespace base {
+
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param)
+{
+}
+
+}
+
+// see atomicops_internals_x86_gcc.h
+// This cheats to get the unittests to build
+
+struct AtomicOps_x86CPUFeatureStruct {
+ bool field1;
+ bool field2;
+};
+
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+ false,
+ false,
+};
diff --git a/media/webrtc/signaling/test/FakeIPC.h b/media/webrtc/signaling/test/FakeIPC.h
new file mode 100644
index 000000000..e13fc271d
--- /dev/null
+++ b/media/webrtc/signaling/test/FakeIPC.h
@@ -0,0 +1,22 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef FAKE_IPC_H_
+#define FAKE_IPC_H_
+#include <unistd.h>
+
+class PlatformThread {
+public:
+ static void YieldCurrentThread();
+};
+
+namespace base {
+class AtExitManager {
+public:
+ typedef void (*AtExitCallbackType)(void*);
+
+ static void RegisterCallback(AtExitCallbackType func, void* param);
+};
+}
+#endif
diff --git a/media/webrtc/signaling/test/FakeLogging.h b/media/webrtc/signaling/test/FakeLogging.h
new file mode 100644
index 000000000..2620ddd01
--- /dev/null
+++ b/media/webrtc/signaling/test/FakeLogging.h
@@ -0,0 +1,26 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef FakeLogging_h
+#define FakeLogging_h
+
+namespace mozilla {
+namespace detail {
+void log_print(const PRLogModuleInfo* aModule,
+ LogLevel aLevel,
+ const char* aFmt, ...)
+ {
+ // copied from Logging.cpp:#48-53
+ va_list ap;
+ va_start(ap, aFmt);
+ char* buff = PR_vsmprintf(aFmt, ap);
+ PR_LogPrint("%s", buff);
+ PR_smprintf_free(buff);
+ va_end(ap);
+ }
+
+}
+}
+
+#endif
diff --git a/media/webrtc/signaling/test/FakeMediaStreams.h b/media/webrtc/signaling/test/FakeMediaStreams.h
new file mode 100644
index 000000000..117d26905
--- /dev/null
+++ b/media/webrtc/signaling/test/FakeMediaStreams.h
@@ -0,0 +1,656 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef FAKE_MEDIA_STREAM_H_
+#define FAKE_MEDIA_STREAM_H_
+
+#include <set>
+#include <string>
+#include <sstream>
+#include <vector>
+
+#include "nsNetCID.h"
+#include "nsITimer.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIComponentManager.h"
+#include "nsIComponentRegistrar.h"
+#include "nsISupportsImpl.h"
+#include "nsServiceManagerUtils.h"
+
+// #includes from MediaStream.h
+#include "mozilla/Mutex.h"
+#include "AudioSegment.h"
+#include "MediaSegment.h"
+#include "StreamTracks.h"
+#include "VideoSegment.h"
+#include "nsTArray.h"
+#include "nsIRunnable.h"
+#include "nsISupportsImpl.h"
+
+class nsPIDOMWindowInner;
+
+namespace mozilla {
+ class MediaStreamGraphImpl;
+ class MediaSegment;
+ class PeerConnectionImpl;
+ class PeerConnectionMedia;
+ class RemoteSourceStreamInfo;
+};
+
+
+namespace mozilla {
+
+class MediaStreamGraph;
+
+static MediaStreamGraph* gGraph;
+
+struct AudioChannel {
+ enum {
+ Normal
+ };
+};
+
+enum MediaStreamGraphEvent : uint32_t;
+enum TrackEventCommand : uint32_t;
+
+class MediaStreamGraph {
+public:
+ // Keep this in sync with the enum in MediaStreamGraph.h
+ enum GraphDriverType {
+ AUDIO_THREAD_DRIVER,
+ SYSTEM_THREAD_DRIVER,
+ OFFLINE_THREAD_DRIVER
+ };
+ static MediaStreamGraph* GetInstance(GraphDriverType aDriverType,
+ uint32_t aType) {
+ if (gGraph) {
+ return gGraph;
+ }
+ gGraph = new MediaStreamGraph();
+ return gGraph;
+ }
+};
+}
+
+class Fake_VideoSink {
+public:
+ Fake_VideoSink() {}
+ virtual void SegmentReady(mozilla::MediaSegment* aSegment) = 0;
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_VideoSink)
+protected:
+ virtual ~Fake_VideoSink() {}
+};
+
+class Fake_MediaStream;
+class Fake_SourceMediaStream;
+
+class Fake_MediaStreamListener
+{
+protected:
+ virtual ~Fake_MediaStreamListener() {}
+
+public:
+ virtual void NotifyQueuedTrackChanges(mozilla::MediaStreamGraph* aGraph, mozilla::TrackID aID,
+ mozilla::StreamTime aTrackOffset,
+ mozilla::TrackEventCommand aTrackEvents,
+ const mozilla::MediaSegment& aQueuedMedia,
+ Fake_MediaStream* aInputStream,
+ mozilla::TrackID aInputTrackID) {}
+ virtual void NotifyPull(mozilla::MediaStreamGraph* aGraph, mozilla::StreamTime aDesiredTime) = 0;
+ virtual void NotifyQueuedAudioData(mozilla::MediaStreamGraph* aGraph, mozilla::TrackID aID,
+ mozilla::StreamTime aTrackOffset,
+ const mozilla::AudioSegment& aQueuedMedia,
+ Fake_MediaStream* aInputStream,
+ mozilla::TrackID aInputTrackID) {}
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStreamListener)
+};
+
+class Fake_DirectMediaStreamListener : public Fake_MediaStreamListener
+{
+protected:
+ virtual ~Fake_DirectMediaStreamListener() {}
+
+public:
+ virtual void NotifyRealtimeData(mozilla::MediaStreamGraph* graph, mozilla::TrackID tid,
+ mozilla::StreamTime offset,
+ const mozilla::MediaSegment& media) = 0;
+};
+
+class Fake_MediaStreamTrackListener
+{
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStreamTrackListener)
+
+protected:
+ virtual ~Fake_MediaStreamTrackListener() {}
+
+public:
+ virtual void NotifyQueuedChanges(mozilla::MediaStreamGraph* aGraph,
+ mozilla::StreamTime aTrackOffset,
+ const mozilla::MediaSegment& aQueuedMedia) = 0;
+};
+
+class Fake_DirectMediaStreamTrackListener : public Fake_MediaStreamTrackListener
+{
+protected:
+ virtual ~Fake_DirectMediaStreamTrackListener() {}
+
+public:
+ virtual void NotifyRealtimeTrackData(mozilla::MediaStreamGraph* aGraph,
+ mozilla::StreamTime aTrackOffset,
+ const mozilla::MediaSegment& aMedia) = 0;
+ enum class InstallationResult {
+ STREAM_NOT_SUPPORTED,
+ SUCCESS
+ };
+ virtual void NotifyDirectListenerInstalled(InstallationResult aResult) = 0;
+ virtual void NotifyDirectListenerUninstalled() = 0;
+};
+
+class Fake_MediaStreamVideoSink : public Fake_DirectMediaStreamTrackListener{
+public:
+ Fake_MediaStreamVideoSink() {}
+
+ void NotifyQueuedChanges(mozilla::MediaStreamGraph* aGraph,
+ mozilla::StreamTime aTrackOffset,
+ const mozilla::MediaSegment& aQueuedMedia) override {}
+
+ void NotifyRealtimeTrackData(mozilla::MediaStreamGraph* aGraph,
+ mozilla::StreamTime aTrackOffset,
+ const mozilla::MediaSegment& aMedia) override {}
+ void NotifyDirectListenerInstalled(InstallationResult aResult) override {}
+ void NotifyDirectListenerUninstalled() override {}
+
+ virtual void SetCurrentFrames(const mozilla::VideoSegment& aSegment) {};
+ virtual void ClearFrames() {};
+
+protected:
+ virtual ~Fake_MediaStreamVideoSink() {}
+};
+
+// Note: only one listener supported
+class Fake_MediaStream {
+ protected:
+ virtual ~Fake_MediaStream() { Stop(); }
+
+ struct BoundTrackListener
+ {
+ BoundTrackListener(Fake_MediaStreamTrackListener* aListener,
+ mozilla::TrackID aTrackID)
+ : mListener(aListener), mTrackID(aTrackID) {}
+ RefPtr<Fake_MediaStreamTrackListener> mListener;
+ mozilla::TrackID mTrackID;
+ };
+
+ public:
+ Fake_MediaStream () : mListeners(), mTrackListeners(), mMutex("Fake MediaStream") {}
+
+ static uint32_t GraphRate() { return 16000; }
+
+ void AddListener(Fake_MediaStreamListener *aListener) {
+ mozilla::MutexAutoLock lock(mMutex);
+ mListeners.insert(aListener);
+ }
+
+ void RemoveListener(Fake_MediaStreamListener *aListener) {
+ mozilla::MutexAutoLock lock(mMutex);
+ mListeners.erase(aListener);
+ }
+
+ void AddTrackListener(Fake_MediaStreamTrackListener *aListener,
+ mozilla::TrackID aTrackID) {
+ mozilla::MutexAutoLock lock(mMutex);
+ mTrackListeners.push_back(BoundTrackListener(aListener, aTrackID));
+ }
+
+ void RemoveTrackListener(Fake_MediaStreamTrackListener *aListener,
+ mozilla::TrackID aTrackID) {
+ mozilla::MutexAutoLock lock(mMutex);
+ for (std::vector<BoundTrackListener>::iterator it = mTrackListeners.begin();
+ it != mTrackListeners.end(); ++it) {
+ if (it->mListener == aListener && it->mTrackID == aTrackID) {
+ mTrackListeners.erase(it);
+ return;
+ }
+ }
+ }
+
+ void NotifyPull(mozilla::MediaStreamGraph* graph,
+ mozilla::StreamTime aDesiredTime) {
+
+ mozilla::MutexAutoLock lock(mMutex);
+ std::set<RefPtr<Fake_MediaStreamListener>>::iterator it;
+ for (it = mListeners.begin(); it != mListeners.end(); ++it) {
+ (*it)->NotifyPull(graph, aDesiredTime);
+ }
+ }
+
+ virtual Fake_SourceMediaStream *AsSourceStream() { return nullptr; }
+
+ virtual mozilla::MediaStreamGraphImpl *GraphImpl() { return nullptr; }
+ virtual nsresult Start() { return NS_OK; }
+ virtual nsresult Stop() { return NS_OK; }
+ virtual void StopStream() {}
+
+ virtual void Periodic() {}
+
+ double StreamTimeToSeconds(mozilla::StreamTime aTime);
+ mozilla::StreamTime
+ TicksToTimeRoundDown(mozilla::TrackRate aRate, mozilla::TrackTicks aTicks);
+ mozilla::TrackTicks TimeToTicksRoundUp(mozilla::TrackRate aRate,
+ mozilla::StreamTime aTime);
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStream);
+
+ protected:
+ std::set<RefPtr<Fake_MediaStreamListener>> mListeners;
+ std::vector<BoundTrackListener> mTrackListeners;
+ mozilla::Mutex mMutex; // Lock to prevent the listener list from being modified while
+ // executing Periodic().
+};
+
+class Fake_MediaPeriodic : public nsITimerCallback {
+public:
+ explicit Fake_MediaPeriodic(Fake_MediaStream *aStream) : mStream(aStream),
+ mCount(0) {}
+ void Detach() {
+ mStream = nullptr;
+ }
+
+ int GetTimesCalled() { return mCount; }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSITIMERCALLBACK
+
+protected:
+ virtual ~Fake_MediaPeriodic() {}
+
+ Fake_MediaStream *mStream;
+ int mCount;
+};
+
+
+class Fake_SourceMediaStream : public Fake_MediaStream {
+ public:
+ Fake_SourceMediaStream() : mSegmentsAdded(0),
+ mDesiredTime(0),
+ mPullEnabled(false),
+ mStop(false),
+ mPeriodic(new Fake_MediaPeriodic(this)) {}
+
+ enum {
+ ADDTRACK_QUEUED = 0x01 // Queue track add until FinishAddTracks()
+ };
+
+ void AddVideoSink(const RefPtr<Fake_VideoSink>& aSink) {
+ mSink = aSink;
+ }
+
+ void AddTrack(mozilla::TrackID aID, mozilla::StreamTime aStart,
+ mozilla::MediaSegment* aSegment, uint32_t aFlags = 0) {
+ delete aSegment;
+ }
+ void AddAudioTrack(mozilla::TrackID aID, mozilla::TrackRate aRate, mozilla::StreamTime aStart,
+ mozilla::AudioSegment* aSegment, uint32_t aFlags = 0) {
+ delete aSegment;
+ }
+ void FinishAddTracks() {}
+ void EndTrack(mozilla::TrackID aID) {}
+
+ bool AppendToTrack(mozilla::TrackID aID, mozilla::MediaSegment* aSegment,
+ mozilla::MediaSegment *aRawSegment) {
+ return AppendToTrack(aID, aSegment);
+ }
+
+ bool AppendToTrack(mozilla::TrackID aID, mozilla::MediaSegment* aSegment) {
+ bool nonZeroSample = false;
+ MOZ_ASSERT(aSegment);
+ if(aSegment->GetType() == mozilla::MediaSegment::AUDIO) {
+ //On audio segment append, we verify for validity
+ //of the audio samples.
+ mozilla::AudioSegment* audio =
+ static_cast<mozilla::AudioSegment*>(aSegment);
+ mozilla::AudioSegment::ChunkIterator iter(*audio);
+ while(!iter.IsEnded()) {
+ mozilla::AudioChunk& chunk = *(iter);
+ MOZ_ASSERT(chunk.mBuffer);
+ const int16_t* buf =
+ static_cast<const int16_t*>(chunk.mChannelData[0]);
+ for(int i=0; i<chunk.mDuration; i++) {
+ if(buf[i]) {
+ //atleast one non-zero sample found.
+ nonZeroSample = true;
+ break;
+ }
+ }
+ //process next chunk
+ iter.Next();
+ }
+ if(nonZeroSample) {
+ //we increment segments count if
+ //atleast one non-zero samples was found.
+ ++mSegmentsAdded;
+ }
+ } else {
+ //in the case of video segment appended, we just increase the
+ //segment count.
+ if (mSink.get()) {
+ mSink->SegmentReady(aSegment);
+ }
+ ++mSegmentsAdded;
+ }
+ return true;
+ }
+
+ void AdvanceKnownTracksTime(mozilla::StreamTime aKnownTime) {}
+
+ void SetPullEnabled(bool aEnabled) {
+ mPullEnabled = aEnabled;
+ }
+ void AddDirectListener(Fake_MediaStreamListener* aListener) {}
+ void RemoveDirectListener(Fake_MediaStreamListener* aListener) {}
+
+ //Don't pull anymore data,if mStop is true.
+ virtual void StopStream() {
+ mStop = true;
+ }
+
+ virtual Fake_SourceMediaStream *AsSourceStream() { return this; }
+
+ virtual nsresult Start();
+ virtual nsresult Stop();
+
+ virtual void Periodic();
+
+ virtual int GetSegmentsAdded() {
+ return mSegmentsAdded;
+ }
+
+ protected:
+ int mSegmentsAdded;
+ uint64_t mDesiredTime;
+ bool mPullEnabled;
+ bool mStop;
+ RefPtr<Fake_MediaPeriodic> mPeriodic;
+ RefPtr<Fake_VideoSink> mSink;
+ nsCOMPtr<nsITimer> mTimer;
+};
+
+class Fake_DOMMediaStream;
+
+class Fake_MediaStreamTrackSource
+{
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStreamTrackSource)
+
+protected:
+ virtual ~Fake_MediaStreamTrackSource() {}
+};
+
+class Fake_MediaStreamTrack
+{
+ friend class mozilla::PeerConnectionImpl;
+ friend class mozilla::PeerConnectionMedia;
+ friend class mozilla::RemoteSourceStreamInfo;
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStreamTrack)
+
+ Fake_MediaStreamTrack(bool aIsVideo, Fake_DOMMediaStream* aOwningStream) :
+ mIsVideo (aIsVideo),
+ mOwningStream (aOwningStream),
+ mTrackID(mIsVideo ? 1 : 0)
+ {
+ static size_t counter = 0;
+ std::ostringstream os;
+ os << counter++;
+ mID = os.str();
+ }
+
+ std::string GetId() const { return mID; }
+ void AssignId(const std::string& id) { mID = id; }
+ mozilla::MediaStreamGraphImpl* GraphImpl() { return nullptr; }
+ const Fake_MediaStreamTrack* AsVideoStreamTrack() const
+ {
+ return mIsVideo? this : nullptr;
+ }
+ const Fake_MediaStreamTrack* AsAudioStreamTrack() const
+ {
+ return mIsVideo? nullptr : this;
+ }
+ uint32_t typeSize () const
+ {
+ return sizeof(Fake_MediaStreamTrack);
+ }
+ const char* typeName () const
+ {
+ return "Fake_MediaStreamTrack";
+ }
+ void AddListener(Fake_MediaStreamTrackListener *aListener);
+ void RemoveListener(Fake_MediaStreamTrackListener *aListener);
+ void AddDirectListener(Fake_DirectMediaStreamTrackListener *aListener)
+ {
+ AddListener(aListener);
+ aListener->NotifyDirectListenerInstalled(
+ Fake_DirectMediaStreamTrackListener::InstallationResult::STREAM_NOT_SUPPORTED);
+ }
+ void RemoveDirectListener(Fake_DirectMediaStreamTrackListener *aListener)
+ {
+ RemoveListener(aListener);
+ }
+
+ class PrincipalChangeObserver
+ {
+ public:
+ virtual void PrincipalChanged(Fake_MediaStreamTrack* aMediaStreamTrack) = 0;
+ };
+ void AddPrincipalChangeObserver(void* ignoredObserver) {}
+ void RemovePrincipalChangeObserver(void* ignoredObserver) {}
+
+private:
+ ~Fake_MediaStreamTrack() {}
+
+ const bool mIsVideo;
+ Fake_DOMMediaStream* mOwningStream;
+ mozilla::TrackID mTrackID;
+ std::string mID;
+};
+
+class Fake_DOMMediaStream : public nsISupports
+{
+ friend class mozilla::PeerConnectionMedia;
+protected:
+ virtual ~Fake_DOMMediaStream() {
+ // Note: memory leak
+ mMediaStream->Stop();
+ }
+
+public:
+ explicit Fake_DOMMediaStream(Fake_MediaStream *stream = nullptr)
+ : mMediaStream(stream ? stream : new Fake_MediaStream())
+ , mVideoTrack(new Fake_MediaStreamTrack(true, this))
+ , mAudioTrack(new Fake_MediaStreamTrack(false, this))
+ {
+ static size_t counter = 0;
+ std::ostringstream os;
+ os << counter++;
+ mID = os.str();
+ }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ static already_AddRefed<Fake_DOMMediaStream>
+ CreateSourceStreamAsInput(nsPIDOMWindowInner* aWindow,
+ mozilla::MediaStreamGraph* aGraph,
+ uint32_t aHintContents = 0) {
+ Fake_SourceMediaStream *source = new Fake_SourceMediaStream();
+
+ RefPtr<Fake_DOMMediaStream> ds = new Fake_DOMMediaStream(source);
+ ds->SetHintContents(aHintContents);
+
+ return ds.forget();
+ }
+
+ virtual void Stop() {} // Really DOMLocalMediaStream
+
+ virtual bool AddDirectListener(Fake_MediaStreamListener *aListener) { return false; }
+ virtual void RemoveDirectListener(Fake_MediaStreamListener *aListener) {}
+
+ Fake_MediaStream *GetInputStream() { return mMediaStream; }
+ Fake_MediaStream *GetOwnedStream() { return mMediaStream; }
+ Fake_MediaStream *GetPlaybackStream() { return mMediaStream; }
+ Fake_MediaStream *GetStream() { return mMediaStream; }
+ std::string GetId() const { return mID; }
+ void AssignId(const std::string& id) { mID = id; }
+ Fake_MediaStreamTrack* GetTrackById(const std::string& aId)
+ {
+ if (mHintContents & HINT_CONTENTS_AUDIO) {
+ if (mAudioTrack && mAudioTrack->GetId() == aId) {
+ return mAudioTrack;
+ }
+ }
+ if (mHintContents & HINT_CONTENTS_VIDEO) {
+ if (mVideoTrack && mVideoTrack->GetId() == aId) {
+ return mVideoTrack;
+ }
+ }
+ return nullptr;
+ }
+ Fake_MediaStreamTrack* GetOwnedTrackById(const std::string& aId)
+ {
+ return GetTrackById(aId);
+ }
+
+ // Hints to tell the SDP generator about whether this
+ // MediaStream probably has audio and/or video
+ typedef uint8_t TrackTypeHints;
+ enum {
+ HINT_CONTENTS_AUDIO = 0x01,
+ HINT_CONTENTS_VIDEO = 0x02
+ };
+ uint32_t GetHintContents() const { return mHintContents; }
+ void SetHintContents(uint32_t aHintContents) { mHintContents = aHintContents; }
+
+ void
+ GetTracks(nsTArray<RefPtr<Fake_MediaStreamTrack> >& aTracks)
+ {
+ GetAudioTracks(aTracks);
+ GetVideoTracks(aTracks);
+ }
+
+ void GetAudioTracks(nsTArray<RefPtr<Fake_MediaStreamTrack> >& aTracks)
+ {
+ if (mHintContents & HINT_CONTENTS_AUDIO) {
+ aTracks.AppendElement(mAudioTrack);
+ }
+ }
+
+ void
+ GetVideoTracks(nsTArray<RefPtr<Fake_MediaStreamTrack> >& aTracks)
+ {
+ if (mHintContents & HINT_CONTENTS_VIDEO) {
+ aTracks.AppendElement(mVideoTrack);
+ }
+ }
+
+ bool
+ HasTrack(const Fake_MediaStreamTrack& aTrack) const
+ {
+ return ((mHintContents & HINT_CONTENTS_AUDIO) && aTrack.AsAudioStreamTrack()) ||
+ ((mHintContents & HINT_CONTENTS_VIDEO) && aTrack.AsVideoStreamTrack());
+ }
+
+ bool
+ OwnsTrack(const Fake_MediaStreamTrack& aTrack) const
+ {
+ return HasTrack(aTrack);
+ }
+
+ void SetTrackEnabled(mozilla::TrackID aTrackID, bool aEnabled) {}
+
+ void AddTrackInternal(Fake_MediaStreamTrack* aTrack) {}
+
+ already_AddRefed<Fake_MediaStreamTrack>
+ CreateDOMTrack(mozilla::TrackID aTrackID, mozilla::MediaSegment::Type aType,
+ Fake_MediaStreamTrackSource* aSource)
+ {
+ switch(aType) {
+ case mozilla::MediaSegment::AUDIO: {
+ return do_AddRef(mAudioTrack);
+ }
+ case mozilla::MediaSegment::VIDEO: {
+ return do_AddRef(mVideoTrack);
+ }
+ default: {
+ MOZ_CRASH("Unkown media type");
+ }
+ }
+ }
+
+private:
+ RefPtr<Fake_MediaStream> mMediaStream;
+
+ // tells the SDP generator about whether this
+ // MediaStream probably has audio and/or video
+ uint32_t mHintContents;
+ RefPtr<Fake_MediaStreamTrack> mVideoTrack;
+ RefPtr<Fake_MediaStreamTrack> mAudioTrack;
+
+ std::string mID;
+};
+
+class Fake_MediaStreamBase : public Fake_MediaStream {
+ public:
+ Fake_MediaStreamBase() : mPeriodic(new Fake_MediaPeriodic(this)) {}
+
+ virtual nsresult Start();
+ virtual nsresult Stop();
+
+ virtual int GetSegmentsAdded() {
+ return mPeriodic->GetTimesCalled();
+ }
+
+ private:
+ nsCOMPtr<nsITimer> mTimer;
+ RefPtr<Fake_MediaPeriodic> mPeriodic;
+};
+
+
+class Fake_AudioStreamSource : public Fake_MediaStreamBase {
+ public:
+ Fake_AudioStreamSource() : Fake_MediaStreamBase(),
+ mCount(0),
+ mStop(false) {}
+ //Signaling Agent indicates us to stop generating
+ //further audio.
+ void StopStream() {
+ mStop = true;
+ }
+ virtual void Periodic();
+ int mCount;
+ bool mStop;
+};
+
+class Fake_VideoStreamSource : public Fake_MediaStreamBase {
+ public:
+ Fake_VideoStreamSource() : Fake_MediaStreamBase() {}
+};
+
+
+namespace mozilla {
+typedef Fake_MediaStream MediaStream;
+typedef Fake_SourceMediaStream SourceMediaStream;
+typedef Fake_MediaStreamListener MediaStreamListener;
+typedef Fake_DirectMediaStreamListener DirectMediaStreamListener;
+typedef Fake_MediaStreamTrackListener MediaStreamTrackListener;
+typedef Fake_DirectMediaStreamTrackListener DirectMediaStreamTrackListener;
+typedef Fake_DOMMediaStream DOMMediaStream;
+typedef Fake_DOMMediaStream DOMLocalMediaStream;
+typedef Fake_MediaStreamVideoSink MediaStreamVideoSink;
+
+namespace dom {
+typedef Fake_MediaStreamTrack MediaStreamTrack;
+typedef Fake_MediaStreamTrackSource MediaStreamTrackSource;
+}
+}
+
+#endif
diff --git a/media/webrtc/signaling/test/FakeMediaStreamsImpl.h b/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
new file mode 100644
index 000000000..da0e81416
--- /dev/null
+++ b/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
@@ -0,0 +1,236 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef FAKE_MEDIA_STREAMIMPL_H_
+#define FAKE_MEDIA_STREAMIMPL_H_
+
+#include "FakeMediaStreams.h"
+
+#include "nspr.h"
+#include "nsError.h"
+
+void LogTime(AsyncLatencyLogger::LatencyLogIndex index, uint64_t b, int64_t c) {}
+void LogLatency(AsyncLatencyLogger::LatencyLogIndex index, uint64_t b, int64_t c) {}
+
+static const int AUDIO_BUFFER_SIZE = 1600;
+static const int NUM_CHANNELS = 2;
+static const int GRAPH_RATE = 16000;
+
+NS_IMPL_ISUPPORTS0(Fake_DOMMediaStream)
+
+// Fake_MediaStream
+double Fake_MediaStream::StreamTimeToSeconds(mozilla::StreamTime aTime) {
+ return static_cast<double>(aTime)/GRAPH_RATE;
+}
+
+mozilla::TrackTicks Fake_MediaStream::TimeToTicksRoundUp(mozilla::TrackRate aRate,
+ mozilla::StreamTime aTime) {
+ return (aTime * aRate) / GRAPH_RATE;
+}
+
+mozilla::StreamTime
+Fake_MediaStream::TicksToTimeRoundDown(mozilla::TrackRate aRate,
+ mozilla::TrackTicks aTicks) {
+ return aTicks * GRAPH_RATE / aRate;
+}
+
+// Fake_SourceMediaStream
+nsresult Fake_SourceMediaStream::Start() {
+ mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+ if (!mTimer) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mTimer->InitWithCallback(mPeriodic, 100, nsITimer::TYPE_REPEATING_SLACK);
+
+ return NS_OK;
+}
+
+nsresult Fake_SourceMediaStream::Stop() {
+ mozilla::MutexAutoLock lock(mMutex);
+ if (mTimer)
+ mTimer->Cancel();
+ mPeriodic->Detach();
+ return NS_OK;
+}
+
+void Fake_SourceMediaStream::Periodic() {
+ mozilla::MutexAutoLock lock(mMutex);
+ // Pull more audio-samples iff pulling is enabled
+ // and we are not asked by the signaling agent to stop
+ //pulling data.
+ if (mPullEnabled && !mStop) {
+ // 100 ms matches timer interval and AUDIO_BUFFER_SIZE @ 16000 Hz
+ mDesiredTime += 100;
+ for (std::set<RefPtr<Fake_MediaStreamListener>>::iterator it =
+ mListeners.begin(); it != mListeners.end(); ++it) {
+ (*it)->NotifyPull(nullptr, TicksToTimeRoundDown(1000 /* ms per s */,
+ mDesiredTime));
+ }
+ }
+}
+
+// Fake_MediaStreamTrack
+void Fake_MediaStreamTrack::AddListener(Fake_MediaStreamTrackListener *aListener)
+{
+ mOwningStream->GetInputStream()->AddTrackListener(aListener, mTrackID);
+}
+void Fake_MediaStreamTrack::RemoveListener(Fake_MediaStreamTrackListener *aListener)
+{
+ mOwningStream->GetInputStream()->RemoveTrackListener(aListener, mTrackID);
+}
+
+// Fake_MediaStreamBase
+nsresult Fake_MediaStreamBase::Start() {
+ mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
+ if (!mTimer) {
+ return NS_ERROR_FAILURE;
+ }
+
+ mTimer->InitWithCallback(mPeriodic, 100, nsITimer::TYPE_REPEATING_SLACK);
+
+ return NS_OK;
+}
+
+nsresult Fake_MediaStreamBase::Stop() {
+ // Lock the mutex so that we know that after this
+ // has returned, periodic will not be firing again
+ // and so it's safe to destruct.
+ mozilla::MutexAutoLock lock(mMutex);
+ mTimer->Cancel();
+
+ return NS_OK;
+}
+
+// Fake_AudioStreamSource
+void Fake_AudioStreamSource::Periodic() {
+ mozilla::MutexAutoLock lock(mMutex);
+ //Are we asked to stop pumping audio samples ?
+ if(mStop) {
+ return;
+ }
+ //Generate Signed 16 Bit Audio samples
+ RefPtr<mozilla::SharedBuffer> samples =
+ mozilla::SharedBuffer::Create(AUDIO_BUFFER_SIZE * NUM_CHANNELS * sizeof(int16_t));
+ int16_t* data = reinterpret_cast<int16_t *>(samples->Data());
+ for(int i=0; i<(1600*2); i++) {
+ //saw tooth audio sample
+ data[i] = ((mCount % 8) * 4000) - (7*4000)/2;
+ mCount++;
+ }
+
+ mozilla::AudioSegment segment;
+ AutoTArray<const int16_t *,1> channels;
+ channels.AppendElement(data);
+ segment.AppendFrames(samples.forget(),
+ channels,
+ AUDIO_BUFFER_SIZE,
+ PRINCIPAL_HANDLE_NONE);
+
+ for(std::set<RefPtr<Fake_MediaStreamListener>>::iterator it = mListeners.begin();
+ it != mListeners.end(); ++it) {
+ (*it)->NotifyQueuedTrackChanges(nullptr, // Graph
+ 0, // TrackID
+ 0, // Offset TODO(ekr@rtfm.com) fix
+ static_cast<mozilla::TrackEventCommand>(0), // ???
+ segment,
+ nullptr, // Input stream
+ -1); // Input track id
+ }
+ for(std::vector<BoundTrackListener>::iterator it = mTrackListeners.begin();
+ it != mTrackListeners.end(); ++it) {
+ it->mListener->NotifyQueuedChanges(nullptr, // Graph
+ 0, // Offset TODO(ekr@rtfm.com) fix
+ segment);
+ }
+}
+
+
+// Fake_MediaPeriodic
+NS_IMPL_ISUPPORTS(Fake_MediaPeriodic, nsITimerCallback)
+
+NS_IMETHODIMP
+Fake_MediaPeriodic::Notify(nsITimer *timer) {
+ if (mStream)
+ mStream->Periodic();
+ ++mCount;
+ return NS_OK;
+}
+
+
+#if 0
+#define WIDTH 320
+#define HEIGHT 240
+#define RATE USECS_PER_S
+#define USECS_PER_S 1000000
+#define FPS 10
+
+NS_IMETHODIMP
+Fake_VideoStreamSource::Notify(nsITimer* aTimer)
+{
+#if 0
+ mozilla::layers::BufferRecycleBin bin;
+
+ RefPtr<mozilla::layers::PlanarYCbCrImage> image = new
+ mozilla::layers::PlanarYCbCrImage(&bin);
+
+ const uint8_t lumaBpp = 8;
+ const uint8_t chromaBpp = 4;
+
+ int len = ((WIDTH * HEIGHT) * 3 / 2);
+ uint8_t* frame = (uint8_t*) PR_Malloc(len);
+ memset(frame, 0x80, len); // Gray
+
+ mozilla::layers::PlanarYCbCrData data;
+ data.mYChannel = frame;
+ data.mYSize = mozilla::gfx::IntSize(WIDTH, HEIGHT);
+ data.mYStride = WIDTH * lumaBpp / 8.0;
+ data.mCbCrStride = WIDTH * chromaBpp / 8.0;
+ data.mCbChannel = frame + HEIGHT * data.mYStride;
+ data.mCrChannel = data.mCbChannel + HEIGHT * data.mCbCrStride / 2;
+ data.mCbCrSize = mozilla::gfx::IntSize(WIDTH / 2, HEIGHT / 2);
+ data.mPicX = 0;
+ data.mPicY = 0;
+ data.mPicSize = mozilla::gfx::IntSize(WIDTH, HEIGHT);
+ data.mStereoMode = mozilla::layers::StereoMode::MONO;
+
+ mozilla::VideoSegment segment;
+ segment.AppendFrame(image.forget(), USECS_PER_S / FPS,
+ mozilla::gfx::IntSize(WIDTH, HEIGHT));
+
+ // TODO(ekr@rtfm.com): are we leaking?
+#endif
+
+ return NS_OK;
+}
+
+
+#if 0
+// Fake up buffer recycle bin
+mozilla::layers::BufferRecycleBin::BufferRecycleBin() :
+ mLock("mozilla.layers.BufferRecycleBin.mLock") {
+}
+
+void mozilla::layers::BufferRecycleBin::RecycleBuffer(uint8_t* buffer, uint32_t size) {
+ PR_Free(buffer);
+}
+
+uint8_t *mozilla::layers::BufferRecycleBin::GetBuffer(uint32_t size) {
+ return (uint8_t *)PR_MALLOC(size);
+}
+
+// YCbCrImage constructor (from ImageLayers.cpp)
+mozilla::layers::PlanarYCbCrImage::PlanarYCbCrImage(BufferRecycleBin *aRecycleBin)
+ : Image(nsnull, ImageFormat::PLANAR_YCBCR)
+ , mBufferSize(0)
+ , mRecycleBin(aRecycleBin)
+{
+}
+
+
+#endif
+#endif
+
+
+#endif
diff --git a/media/webrtc/signaling/test/FakePCObserver.h b/media/webrtc/signaling/test/FakePCObserver.h
new file mode 100644
index 000000000..460059b7f
--- /dev/null
+++ b/media/webrtc/signaling/test/FakePCObserver.h
@@ -0,0 +1,112 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TEST_PCOBSERVER_H_
+#define TEST_PCOBSERVER_H_
+
+#include "nsNetCID.h"
+#include "nsITimer.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIComponentManager.h"
+#include "nsIComponentRegistrar.h"
+
+#include "mozilla/Mutex.h"
+#include "AudioSegment.h"
+#include "MediaSegment.h"
+#include "StreamTracks.h"
+#include "nsTArray.h"
+#include "nsIRunnable.h"
+#include "nsISupportsImpl.h"
+#include "mozilla/dom/PeerConnectionObserverEnumsBinding.h"
+#include "PeerConnectionImpl.h"
+#include "nsWeakReference.h"
+
+namespace mozilla {
+class PeerConnectionImpl;
+}
+
+class nsIDOMWindow;
+class nsIDOMDataChannel;
+
+namespace test {
+
+class AFakePCObserver : public nsSupportsWeakReference
+{
+protected:
+ typedef mozilla::ErrorResult ER;
+public:
+ enum Action {
+ OFFER,
+ ANSWER
+ };
+
+ enum ResponseState {
+ stateNoResponse,
+ stateSuccess,
+ stateError
+ };
+
+ AFakePCObserver(mozilla::PeerConnectionImpl *peerConnection,
+ const std::string &aName) :
+ state(stateNoResponse), addIceSuccessCount(0),
+ onAddStreamCalled(false),
+ name(aName),
+ pc(peerConnection) {
+ }
+
+ AFakePCObserver() :
+ state(stateNoResponse), addIceSuccessCount(0),
+ onAddStreamCalled(false),
+ name(""),
+ pc(nullptr) {
+ }
+
+ virtual ~AFakePCObserver() {}
+
+ std::vector<mozilla::DOMMediaStream *> GetStreams() { return streams; }
+
+ ResponseState state;
+ std::string lastString;
+ mozilla::PeerConnectionImpl::Error lastStatusCode;
+ mozilla::dom::PCObserverStateType lastStateType;
+ int addIceSuccessCount;
+ bool onAddStreamCalled;
+ std::string name;
+ std::vector<std::string> candidates;
+
+ NS_IMETHOD OnCreateOfferSuccess(const char* offer, ER&) = 0;
+ NS_IMETHOD OnCreateOfferError(uint32_t code, const char *msg, ER&) = 0;
+ NS_IMETHOD OnCreateAnswerSuccess(const char* answer, ER&) = 0;
+ NS_IMETHOD OnCreateAnswerError(uint32_t code, const char *msg, ER&) = 0;
+ NS_IMETHOD OnSetLocalDescriptionSuccess(ER&) = 0;
+ NS_IMETHOD OnSetRemoteDescriptionSuccess(ER&) = 0;
+ NS_IMETHOD OnSetLocalDescriptionError(uint32_t code, const char *msg, ER&) = 0;
+ NS_IMETHOD OnSetRemoteDescriptionError(uint32_t code, const char *msg, ER&) = 0;
+ NS_IMETHOD NotifyDataChannel(nsIDOMDataChannel *channel, ER&) = 0;
+ NS_IMETHOD OnStateChange(mozilla::dom::PCObserverStateType state_type, ER&,
+ void* = nullptr) = 0;
+ NS_IMETHOD OnAddStream(mozilla::DOMMediaStream &stream, ER&) = 0;
+ NS_IMETHOD OnRemoveStream(mozilla::DOMMediaStream &stream, ER&) = 0;
+ NS_IMETHOD OnAddTrack(mozilla::dom::MediaStreamTrack &track, ER&) = 0;
+ NS_IMETHOD OnRemoveTrack(mozilla::dom::MediaStreamTrack &track, ER&) = 0;
+ NS_IMETHOD OnReplaceTrackSuccess(ER&) = 0;
+ NS_IMETHOD OnReplaceTrackError(uint32_t code, const char *msg, ER&) = 0;
+ NS_IMETHOD OnAddIceCandidateSuccess(ER&) = 0;
+ NS_IMETHOD OnAddIceCandidateError(uint32_t code, const char *msg, ER&) = 0;
+ NS_IMETHOD OnIceCandidate(uint16_t level, const char *mid,
+ const char *candidate, ER&) = 0;
+ NS_IMETHOD OnNegotiationNeeded(ER&) = 0;
+protected:
+ mozilla::PeerConnectionImpl *pc;
+ std::vector<mozilla::DOMMediaStream *> streams;
+};
+}
+
+namespace mozilla {
+namespace dom {
+typedef test::AFakePCObserver PeerConnectionObserver;
+}
+}
+
+#endif
diff --git a/media/webrtc/signaling/test/common.build b/media/webrtc/signaling/test/common.build
new file mode 100644
index 000000000..3e5450f5d
--- /dev/null
+++ b/media/webrtc/signaling/test/common.build
@@ -0,0 +1,134 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+if CONFIG['OS_TARGET'] in ('Darwin', 'Android'):
+ DEFINES['GTEST_USE_OWN_TR1_TUPLE'] = 1
+
+for var in ('MOZILLA_EXTERNAL_LINKAGE', 'USE_FAKE_MEDIA_STREAMS', 'USE_FAKE_PCOBSERVER',
+ 'NR_SOCKET_IS_VOID_PTR', 'HAVE_STRDUP'):
+ DEFINES[var] = True
+
+LOCAL_INCLUDES += [
+ '!/dist/include/mozilla/dom', # Binding headers (because binding
+ # implementations include them).
+ '!/dom/bindings', # Binding implementations (urk).
+ '/dom/media/',
+ '/ipc/chromium/src',
+ '/media/mtransport',
+ '/media/mtransport/test',
+ '/media/mtransport/third_party/nICEr/src/ice',
+ '/media/mtransport/third_party/nICEr/src/net',
+ '/media/mtransport/third_party/nICEr/src/stun',
+ '/media/mtransport/third_party/nrappkit/src/event',
+ '/media/mtransport/third_party/nrappkit/src/log',
+ '/media/mtransport/third_party/nrappkit/src/plugin',
+ '/media/mtransport/third_party/nrappkit/src/port/generic/include',
+ '/media/mtransport/third_party/nrappkit/src/registry',
+ '/media/mtransport/third_party/nrappkit/src/share',
+ '/media/mtransport/third_party/nrappkit/src/stats',
+ '/media/mtransport/third_party/nrappkit/src/util/libekr',
+ '/media/webrtc',
+ '/media/webrtc/signaling/src/common/browser_logging',
+ '/media/webrtc/signaling/src/common/time_profiling',
+ '/media/webrtc/signaling/src/media',
+ '/media/webrtc/signaling/src/media-conduit',
+ '/media/webrtc/signaling/src/mediapipeline',
+ '/media/webrtc/signaling/src/peerconnection',
+ '/media/webrtc/signaling/src/sdp/sipcc',
+ '/media/webrtc/trunk',
+ '/media/webrtc/trunk/testing/gtest/include',
+ '/xpcom/base',
+]
+
+if CONFIG['OS_TARGET'] == 'Android':
+ LOCAL_INCLUDES += [
+ '/media/mtransport/third_party/nrappkit/src/port/android/include',
+ ]
+
+if CONFIG['OS_TARGET'] == 'Linux':
+ LOCAL_INCLUDES += [
+ '/media/mtransport/third_party/nrappkit/src/port/linux/include',
+ ]
+
+if CONFIG['OS_TARGET'] == 'Darwin':
+ LOCAL_INCLUDES += [
+ '/media/mtransport/third_party/nrappkit/src/port/darwin/include',
+ ]
+ OS_LIBS += [
+ '-framework AudioToolbox',
+ '-framework AudioUnit',
+ '-framework Carbon',
+ '-framework CoreAudio',
+ '-framework OpenGL',
+ '-framework AVFoundation',
+ '-framework CoreMedia',
+ '-framework QuartzCore',
+ '-framework Security',
+ '-framework SystemConfiguration',
+ '-framework IOKit',
+ '-F%s' % CONFIG['MACOS_PRIVATE_FRAMEWORKS_DIR'],
+ '-framework CoreUI',
+ ]
+
+if CONFIG['OS_TARGET'] in ('DragonFly', 'FreeBSD', 'NetBSD', 'OpenBSD'):
+ LOCAL_INCLUDES += [
+ '/media/mtransport/third_party/nrappkit/src/port/darwin/include',
+ ]
+
+USE_LIBS += [
+ '/media/webrtc/trunk/testing/gtest_gtest/gtest',
+ 'chromium_atomics',
+ 'gkmedias',
+ 'nksrtp_s',
+ 'nss',
+ 'webrtc',
+ 'yuv',
+ 'zlib',
+]
+
+if CONFIG['JS_SHARED_LIBRARY']:
+ USE_LIBS += [
+ 'js',
+ ]
+
+USE_LIBS += ['mozglue']
+
+OS_LIBS += CONFIG['MOZ_WEBRTC_X11_LIBS']
+OS_LIBS += CONFIG['REALTIME_LIBS']
+
+if CONFIG['MOZ_ALSA']:
+ OS_LIBS += CONFIG['MOZ_ALSA_LIBS']
+
+if CONFIG['MOZ_SYSTEM_JPEG']:
+ OS_LIBS += CONFIG['MOZ_JPEG_LIBS']
+
+if CONFIG['MOZ_SYSTEM_LIBVPX']:
+ OS_LIBS += CONFIG['MOZ_LIBVPX_LIBS']
+
+if not CONFIG['MOZ_TREE_PIXMAN']:
+ OS_LIBS += CONFIG['MOZ_PIXMAN_LIBS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
+ OS_LIBS += CONFIG['XLIBS']
+ OS_LIBS += CONFIG['MOZ_GTK2_LIBS']
+ OS_LIBS += [
+ 'gmodule-2.0',
+ 'gthread-2.0',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk3':
+ OS_LIBS += CONFIG['XLIBS']
+ OS_LIBS += CONFIG['MOZ_GTK3_LIBS']
+ USE_LIBS += [
+ 'freetype',
+ ]
+
+if CONFIG['OS_TARGET'] in ('Linux', 'DragonFly', 'FreeBSD', 'NetBSD',
+ 'OpenBSD'):
+ OS_LIBS += CONFIG['MOZ_CAIRO_OSLIBS']
+
+if CONFIG['OS_TARGET'] == 'Darwin':
+ OS_LIBS += CONFIG['TK_LIBS']
diff --git a/media/webrtc/signaling/test/jsep_session_unittest.cpp b/media/webrtc/signaling/test/jsep_session_unittest.cpp
new file mode 100644
index 000000000..d29400771
--- /dev/null
+++ b/media/webrtc/signaling/test/jsep_session_unittest.cpp
@@ -0,0 +1,4235 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <iostream>
+#include <map>
+
+#include "nspr.h"
+#include "nss.h"
+#include "ssl.h"
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/Tuple.h"
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+#include "FakeMediaStreams.h"
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/sdp/SipccSdpParser.h"
+#include "signaling/src/jsep/JsepCodecDescription.h"
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/jsep/JsepSession.h"
+#include "signaling/src/jsep/JsepSessionImpl.h"
+#include "signaling/src/jsep/JsepTrack.h"
+
+#include "mtransport_test_utils.h"
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+#include "TestHarness.h"
+
+namespace mozilla {
+static std::string kAEqualsCandidate("a=candidate:");
+const static size_t kNumCandidatesPerComponent = 3;
+
+class JsepSessionTestBase : public ::testing::Test
+{
+};
+
+class FakeUuidGenerator : public mozilla::JsepUuidGenerator
+{
+public:
+ bool
+ Generate(std::string* str)
+ {
+ std::ostringstream os;
+ os << "FAKE_UUID_" << ++ctr;
+ *str = os.str();
+
+ return true;
+ }
+
+private:
+ static uint64_t ctr;
+};
+
+uint64_t FakeUuidGenerator::ctr = 1000;
+
+class JsepSessionTest : public JsepSessionTestBase,
+ public ::testing::WithParamInterface<std::string>
+{
+public:
+ JsepSessionTest()
+ : mSessionOff("Offerer", MakeUnique<FakeUuidGenerator>()),
+ mSessionAns("Answerer", MakeUnique<FakeUuidGenerator>())
+ {
+ EXPECT_EQ(NS_OK, mSessionOff.Init());
+ EXPECT_EQ(NS_OK, mSessionAns.Init());
+
+ AddTransportData(&mSessionOff, &mOffererTransport);
+ AddTransportData(&mSessionAns, &mAnswererTransport);
+ }
+
+protected:
+ struct TransportData {
+ std::string mIceUfrag;
+ std::string mIcePwd;
+ std::map<std::string, std::vector<uint8_t> > mFingerprints;
+ };
+
+ void
+ AddDtlsFingerprint(const std::string& alg, JsepSessionImpl* session,
+ TransportData* tdata)
+ {
+ std::vector<uint8_t> fp;
+ fp.assign((alg == "sha-1") ? 20 : 32,
+ (session->GetName() == "Offerer") ? 0x4f : 0x41);
+ session->AddDtlsFingerprint(alg, fp);
+ tdata->mFingerprints[alg] = fp;
+ }
+
+ void
+ AddTransportData(JsepSessionImpl* session, TransportData* tdata)
+ {
+ // Values here semi-borrowed from JSEP draft.
+ tdata->mIceUfrag = session->GetName() + "-ufrag";
+ tdata->mIcePwd = session->GetName() + "-1234567890";
+ session->SetIceCredentials(tdata->mIceUfrag, tdata->mIcePwd);
+ AddDtlsFingerprint("sha-1", session, tdata);
+ AddDtlsFingerprint("sha-256", session, tdata);
+ }
+
+ std::string
+ CreateOffer(const Maybe<JsepOfferOptions> options = Nothing())
+ {
+ JsepOfferOptions defaultOptions;
+ const JsepOfferOptions& optionsRef = options ? *options : defaultOptions;
+ std::string offer;
+ nsresult rv = mSessionOff.CreateOffer(optionsRef, &offer);
+ EXPECT_EQ(NS_OK, rv) << mSessionOff.GetLastError();
+
+ std::cerr << "OFFER: " << offer << std::endl;
+
+ ValidateTransport(mOffererTransport, offer);
+
+ return offer;
+ }
+
+ void
+ AddTracks(JsepSessionImpl& side)
+ {
+ // Add tracks.
+ if (types.empty()) {
+ types = BuildTypes(GetParam());
+ }
+ AddTracks(side, types);
+
+ // Now that we have added streams, we expect audio, then video, then
+ // application in the SDP, regardless of the order in which the streams were
+ // added.
+ std::sort(types.begin(), types.end());
+ }
+
+ void
+ AddTracks(JsepSessionImpl& side, const std::string& mediatypes)
+ {
+ AddTracks(side, BuildTypes(mediatypes));
+ }
+
+ std::vector<SdpMediaSection::MediaType>
+ BuildTypes(const std::string& mediatypes)
+ {
+ std::vector<SdpMediaSection::MediaType> result;
+ size_t ptr = 0;
+
+ for (;;) {
+ size_t comma = mediatypes.find(',', ptr);
+ std::string chunk = mediatypes.substr(ptr, comma - ptr);
+
+ SdpMediaSection::MediaType type;
+ if (chunk == "audio") {
+ type = SdpMediaSection::kAudio;
+ } else if (chunk == "video") {
+ type = SdpMediaSection::kVideo;
+ } else if (chunk == "datachannel") {
+ type = SdpMediaSection::kApplication;
+ } else {
+ MOZ_CRASH();
+ }
+ result.push_back(type);
+
+ if (comma == std::string::npos)
+ break;
+ ptr = comma + 1;
+ }
+
+ return result;
+ }
+
+ void
+ AddTracks(JsepSessionImpl& side,
+ const std::vector<SdpMediaSection::MediaType>& mediatypes)
+ {
+ FakeUuidGenerator uuid_gen;
+ std::string stream_id;
+ std::string track_id;
+
+ ASSERT_TRUE(uuid_gen.Generate(&stream_id));
+
+ AddTracksToStream(side, stream_id, mediatypes);
+ }
+
+ void
+ AddTracksToStream(JsepSessionImpl& side,
+ const std::string stream_id,
+ const std::string& mediatypes)
+ {
+ AddTracksToStream(side, stream_id, BuildTypes(mediatypes));
+ }
+
+ void
+ AddTracksToStream(JsepSessionImpl& side,
+ const std::string stream_id,
+ const std::vector<SdpMediaSection::MediaType>& mediatypes)
+
+ {
+ FakeUuidGenerator uuid_gen;
+ std::string track_id;
+
+ for (auto track = mediatypes.begin(); track != mediatypes.end(); ++track) {
+ ASSERT_TRUE(uuid_gen.Generate(&track_id));
+
+ RefPtr<JsepTrack> mst(new JsepTrack(*track, stream_id, track_id));
+ side.AddTrack(mst);
+ }
+ }
+
+ bool HasMediaStream(std::vector<RefPtr<JsepTrack>> tracks) const {
+ for (auto i = tracks.begin(); i != tracks.end(); ++i) {
+ if ((*i)->GetMediaType() != SdpMediaSection::kApplication) {
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ const std::string GetFirstLocalStreamId(JsepSessionImpl& side) const {
+ auto tracks = side.GetLocalTracks();
+ return (*tracks.begin())->GetStreamId();
+ }
+
+ std::vector<std::string>
+ GetMediaStreamIds(std::vector<RefPtr<JsepTrack>> tracks) const {
+ std::vector<std::string> ids;
+ for (auto i = tracks.begin(); i != tracks.end(); ++i) {
+ // data channels don't have msid's
+ if ((*i)->GetMediaType() == SdpMediaSection::kApplication) {
+ continue;
+ }
+ ids.push_back((*i)->GetStreamId());
+ }
+ return ids;
+ }
+
+ std::vector<std::string>
+ GetLocalMediaStreamIds(JsepSessionImpl& side) const {
+ return GetMediaStreamIds(side.GetLocalTracks());
+ }
+
+ std::vector<std::string>
+ GetRemoteMediaStreamIds(JsepSessionImpl& side) const {
+ return GetMediaStreamIds(side.GetRemoteTracks());
+ }
+
+ std::vector<std::string>
+ sortUniqueStrVector(std::vector<std::string> in) const {
+ std::sort(in.begin(), in.end());
+ auto it = std::unique(in.begin(), in.end());
+ in.resize( std::distance(in.begin(), it));
+ return in;
+ }
+
+ std::vector<std::string>
+ GetLocalUniqueStreamIds(JsepSessionImpl& side) const {
+ return sortUniqueStrVector(GetLocalMediaStreamIds(side));
+ }
+
+ std::vector<std::string>
+ GetRemoteUniqueStreamIds(JsepSessionImpl& side) const {
+ return sortUniqueStrVector(GetRemoteMediaStreamIds(side));
+ }
+
+ RefPtr<JsepTrack> GetTrack(JsepSessionImpl& side,
+ SdpMediaSection::MediaType type,
+ size_t index) const {
+ auto tracks = side.GetLocalTracks();
+
+ for (auto i = tracks.begin(); i != tracks.end(); ++i) {
+ if ((*i)->GetMediaType() != type) {
+ continue;
+ }
+
+ if (index != 0) {
+ --index;
+ continue;
+ }
+
+ return *i;
+ }
+
+ return RefPtr<JsepTrack>(nullptr);
+ }
+
+ RefPtr<JsepTrack> GetTrackOff(size_t index,
+ SdpMediaSection::MediaType type) {
+ return GetTrack(mSessionOff, type, index);
+ }
+
+ RefPtr<JsepTrack> GetTrackAns(size_t index,
+ SdpMediaSection::MediaType type) {
+ return GetTrack(mSessionAns, type, index);
+ }
+
+ class ComparePairsByLevel {
+ public:
+ bool operator()(const JsepTrackPair& lhs,
+ const JsepTrackPair& rhs) const {
+ return lhs.mLevel < rhs.mLevel;
+ }
+ };
+
+ std::vector<JsepTrackPair> GetTrackPairsByLevel(JsepSessionImpl& side) const {
+ auto pairs = side.GetNegotiatedTrackPairs();
+ std::sort(pairs.begin(), pairs.end(), ComparePairsByLevel());
+ return pairs;
+ }
+
+ bool Equals(const SdpFingerprintAttributeList::Fingerprint& f1,
+ const SdpFingerprintAttributeList::Fingerprint& f2) const {
+ if (f1.hashFunc != f2.hashFunc) {
+ return false;
+ }
+
+ if (f1.fingerprint != f2.fingerprint) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool Equals(const SdpFingerprintAttributeList& f1,
+ const SdpFingerprintAttributeList& f2) const {
+ if (f1.mFingerprints.size() != f2.mFingerprints.size()) {
+ return false;
+ }
+
+ for (size_t i=0; i<f1.mFingerprints.size(); ++i) {
+ if (!Equals(f1.mFingerprints[i], f2.mFingerprints[i])) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool Equals(const UniquePtr<JsepDtlsTransport>& t1,
+ const UniquePtr<JsepDtlsTransport>& t2) const {
+ if (!t1 && !t2) {
+ return true;
+ }
+
+ if (!t1 || !t2) {
+ return false;
+ }
+
+ if (!Equals(t1->GetFingerprints(), t2->GetFingerprints())) {
+ return false;
+ }
+
+ if (t1->GetRole() != t2->GetRole()) {
+ return false;
+ }
+
+ return true;
+ }
+
+
+ bool Equals(const UniquePtr<JsepIceTransport>& t1,
+ const UniquePtr<JsepIceTransport>& t2) const {
+ if (!t1 && !t2) {
+ return true;
+ }
+
+ if (!t1 || !t2) {
+ return false;
+ }
+
+ if (t1->GetUfrag() != t2->GetUfrag()) {
+ return false;
+ }
+
+ if (t1->GetPassword() != t2->GetPassword()) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool Equals(const RefPtr<JsepTransport>& t1,
+ const RefPtr<JsepTransport>& t2) const {
+ if (!t1 && !t2) {
+ return true;
+ }
+
+ if (!t1 || !t2) {
+ return false;
+ }
+
+ if (t1->mTransportId != t2->mTransportId) {
+ return false;
+ }
+
+ if (t1->mComponents != t2->mComponents) {
+ return false;
+ }
+
+ if (!Equals(t1->mIce, t2->mIce)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool Equals(const JsepTrackPair& p1,
+ const JsepTrackPair& p2) const {
+ if (p1.mLevel != p2.mLevel) {
+ return false;
+ }
+
+ // We don't check things like mBundleLevel, since that can change without
+ // any changes to the transport, which is what we're really interested in.
+
+ if (p1.mSending.get() != p2.mSending.get()) {
+ return false;
+ }
+
+ if (p1.mReceiving.get() != p2.mReceiving.get()) {
+ return false;
+ }
+
+ if (!Equals(p1.mRtpTransport, p2.mRtpTransport)) {
+ return false;
+ }
+
+ if (!Equals(p1.mRtcpTransport, p2.mRtcpTransport)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ size_t GetTrackCount(JsepSessionImpl& side,
+ SdpMediaSection::MediaType type) const {
+ auto tracks = side.GetLocalTracks();
+ size_t result = 0;
+ for (auto i = tracks.begin(); i != tracks.end(); ++i) {
+ if ((*i)->GetMediaType() == type) {
+ ++result;
+ }
+ }
+ return result;
+ }
+
+ UniquePtr<Sdp> GetParsedLocalDescription(const JsepSessionImpl& side) const {
+ return Parse(side.GetLocalDescription());
+ }
+
+ SdpMediaSection* GetMsection(Sdp& sdp,
+ SdpMediaSection::MediaType type,
+ size_t index) const {
+ for (size_t i = 0; i < sdp.GetMediaSectionCount(); ++i) {
+ auto& msection = sdp.GetMediaSection(i);
+ if (msection.GetMediaType() != type) {
+ continue;
+ }
+
+ if (index) {
+ --index;
+ continue;
+ }
+
+ return &msection;
+ }
+
+ return nullptr;
+ }
+
+ void
+ SetPayloadTypeNumber(JsepSession& session,
+ const std::string& codecName,
+ const std::string& payloadType)
+ {
+ for (auto* codec : session.Codecs()) {
+ if (codec->mName == codecName) {
+ codec->mDefaultPt = payloadType;
+ }
+ }
+ }
+
+ void
+ SetCodecEnabled(JsepSession& session,
+ const std::string& codecName,
+ bool enabled)
+ {
+ for (auto* codec : session.Codecs()) {
+ if (codec->mName == codecName) {
+ codec->mEnabled = enabled;
+ }
+ }
+ }
+
+ void
+ EnsureNegotiationFailure(SdpMediaSection::MediaType type,
+ const std::string& codecName)
+ {
+ for (auto i = mSessionOff.Codecs().begin(); i != mSessionOff.Codecs().end();
+ ++i) {
+ auto* codec = *i;
+ if (codec->mType == type && codec->mName != codecName) {
+ codec->mEnabled = false;
+ }
+ }
+
+ for (auto i = mSessionAns.Codecs().begin(); i != mSessionAns.Codecs().end();
+ ++i) {
+ auto* codec = *i;
+ if (codec->mType == type && codec->mName == codecName) {
+ codec->mEnabled = false;
+ }
+ }
+ }
+
+ std::string
+ CreateAnswer()
+ {
+ JsepAnswerOptions options;
+ std::string answer;
+ nsresult rv = mSessionAns.CreateAnswer(options, &answer);
+ EXPECT_EQ(NS_OK, rv);
+
+ std::cerr << "ANSWER: " << answer << std::endl;
+
+ ValidateTransport(mAnswererTransport, answer);
+
+ return answer;
+ }
+
+ static const uint32_t NO_CHECKS = 0;
+ static const uint32_t CHECK_SUCCESS = 1;
+ static const uint32_t CHECK_TRACKS = 1 << 2;
+ static const uint32_t ALL_CHECKS = CHECK_SUCCESS | CHECK_TRACKS;
+
+ void OfferAnswer(uint32_t checkFlags = ALL_CHECKS,
+ const Maybe<JsepOfferOptions> options = Nothing()) {
+ std::string offer = CreateOffer(options);
+ SetLocalOffer(offer, checkFlags);
+ SetRemoteOffer(offer, checkFlags);
+
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer, checkFlags);
+ SetRemoteAnswer(answer, checkFlags);
+ }
+
+ void
+ SetLocalOffer(const std::string& offer, uint32_t checkFlags = ALL_CHECKS)
+ {
+ nsresult rv = mSessionOff.SetLocalDescription(kJsepSdpOffer, offer);
+
+ if (checkFlags & CHECK_SUCCESS) {
+ ASSERT_EQ(NS_OK, rv);
+ }
+
+ if (checkFlags & CHECK_TRACKS) {
+ // Check that the transports exist.
+ ASSERT_EQ(types.size(), mSessionOff.GetTransports().size());
+ auto tracks = mSessionOff.GetLocalTracks();
+ for (size_t i = 0; i < types.size(); ++i) {
+ ASSERT_NE("", tracks[i]->GetStreamId());
+ ASSERT_NE("", tracks[i]->GetTrackId());
+ if (tracks[i]->GetMediaType() != SdpMediaSection::kApplication) {
+ std::string msidAttr("a=msid:");
+ msidAttr += tracks[i]->GetStreamId();
+ msidAttr += " ";
+ msidAttr += tracks[i]->GetTrackId();
+ ASSERT_NE(std::string::npos, offer.find(msidAttr))
+ << "Did not find " << msidAttr << " in offer";
+ }
+ }
+ }
+ }
+
+ void
+ SetRemoteOffer(const std::string& offer, uint32_t checkFlags = ALL_CHECKS)
+ {
+ nsresult rv = mSessionAns.SetRemoteDescription(kJsepSdpOffer, offer);
+
+ if (checkFlags & CHECK_SUCCESS) {
+ ASSERT_EQ(NS_OK, rv);
+ }
+
+ if (checkFlags & CHECK_TRACKS) {
+ auto tracks = mSessionAns.GetRemoteTracks();
+ // Now verify that the right stuff is in the tracks.
+ ASSERT_EQ(types.size(), tracks.size());
+ for (size_t i = 0; i < tracks.size(); ++i) {
+ ASSERT_EQ(types[i], tracks[i]->GetMediaType());
+ ASSERT_NE("", tracks[i]->GetStreamId());
+ ASSERT_NE("", tracks[i]->GetTrackId());
+ if (tracks[i]->GetMediaType() != SdpMediaSection::kApplication) {
+ std::string msidAttr("a=msid:");
+ msidAttr += tracks[i]->GetStreamId();
+ msidAttr += " ";
+ msidAttr += tracks[i]->GetTrackId();
+ ASSERT_NE(std::string::npos, offer.find(msidAttr))
+ << "Did not find " << msidAttr << " in offer";
+ }
+ }
+ }
+ }
+
+ void
+ SetLocalAnswer(const std::string& answer, uint32_t checkFlags = ALL_CHECKS)
+ {
+ nsresult rv = mSessionAns.SetLocalDescription(kJsepSdpAnswer, answer);
+ if (checkFlags & CHECK_SUCCESS) {
+ ASSERT_EQ(NS_OK, rv);
+ }
+
+ if (checkFlags & CHECK_TRACKS) {
+ // Verify that the right stuff is in the tracks.
+ auto pairs = mSessionAns.GetNegotiatedTrackPairs();
+ ASSERT_EQ(types.size(), pairs.size());
+ for (size_t i = 0; i < types.size(); ++i) {
+ ASSERT_TRUE(pairs[i].mSending);
+ ASSERT_EQ(types[i], pairs[i].mSending->GetMediaType());
+ ASSERT_TRUE(pairs[i].mReceiving);
+ ASSERT_EQ(types[i], pairs[i].mReceiving->GetMediaType());
+ ASSERT_NE("", pairs[i].mSending->GetStreamId());
+ ASSERT_NE("", pairs[i].mSending->GetTrackId());
+ // These might have been in the SDP, or might have been randomly
+ // chosen by JsepSessionImpl
+ ASSERT_NE("", pairs[i].mReceiving->GetStreamId());
+ ASSERT_NE("", pairs[i].mReceiving->GetTrackId());
+
+ if (pairs[i].mReceiving->GetMediaType() != SdpMediaSection::kApplication) {
+ std::string msidAttr("a=msid:");
+ msidAttr += pairs[i].mSending->GetStreamId();
+ msidAttr += " ";
+ msidAttr += pairs[i].mSending->GetTrackId();
+ ASSERT_NE(std::string::npos, answer.find(msidAttr))
+ << "Did not find " << msidAttr << " in offer";
+ }
+ }
+ }
+ std::cerr << "OFFER pairs:" << std::endl;
+ DumpTrackPairs(mSessionOff);
+ }
+
+ void
+ SetRemoteAnswer(const std::string& answer, uint32_t checkFlags = ALL_CHECKS)
+ {
+ nsresult rv = mSessionOff.SetRemoteDescription(kJsepSdpAnswer, answer);
+ if (checkFlags & CHECK_SUCCESS) {
+ ASSERT_EQ(NS_OK, rv);
+ }
+
+ if (checkFlags & CHECK_TRACKS) {
+ // Verify that the right stuff is in the tracks.
+ auto pairs = mSessionOff.GetNegotiatedTrackPairs();
+ ASSERT_EQ(types.size(), pairs.size());
+ for (size_t i = 0; i < types.size(); ++i) {
+ ASSERT_TRUE(pairs[i].mSending);
+ ASSERT_EQ(types[i], pairs[i].mSending->GetMediaType());
+ ASSERT_TRUE(pairs[i].mReceiving);
+ ASSERT_EQ(types[i], pairs[i].mReceiving->GetMediaType());
+ ASSERT_NE("", pairs[i].mSending->GetStreamId());
+ ASSERT_NE("", pairs[i].mSending->GetTrackId());
+ // These might have been in the SDP, or might have been randomly
+ // chosen by JsepSessionImpl
+ ASSERT_NE("", pairs[i].mReceiving->GetStreamId());
+ ASSERT_NE("", pairs[i].mReceiving->GetTrackId());
+
+ if (pairs[i].mReceiving->GetMediaType() != SdpMediaSection::kApplication) {
+ std::string msidAttr("a=msid:");
+ msidAttr += pairs[i].mReceiving->GetStreamId();
+ msidAttr += " ";
+ msidAttr += pairs[i].mReceiving->GetTrackId();
+ ASSERT_NE(std::string::npos, answer.find(msidAttr))
+ << "Did not find " << msidAttr << " in answer";
+ }
+ }
+ }
+ std::cerr << "ANSWER pairs:" << std::endl;
+ DumpTrackPairs(mSessionAns);
+ }
+
+ typedef enum {
+ RTP = 1,
+ RTCP = 2
+ } ComponentType;
+
+ class CandidateSet {
+ public:
+ CandidateSet() {}
+
+ void Gather(JsepSession& session,
+ const std::vector<SdpMediaSection::MediaType>& types,
+ ComponentType maxComponent = RTCP)
+ {
+ for (size_t level = 0; level < types.size(); ++level) {
+ Gather(session, level, RTP);
+ if (types[level] != SdpMediaSection::kApplication &&
+ maxComponent == RTCP) {
+ Gather(session, level, RTCP);
+ }
+ }
+ FinishGathering(session);
+ }
+
+ void Gather(JsepSession& session, size_t level, ComponentType component)
+ {
+ static uint16_t port = 1000;
+ std::vector<std::string> candidates;
+ for (size_t i = 0; i < kNumCandidatesPerComponent; ++i) {
+ ++port;
+ std::ostringstream candidate;
+ candidate << "0 " << static_cast<uint16_t>(component)
+ << " UDP 9999 192.168.0.1 " << port << " typ host";
+ std::string mid;
+ bool skipped;
+ session.AddLocalIceCandidate(kAEqualsCandidate + candidate.str(),
+ level, &mid, &skipped);
+ if (!skipped) {
+ mCandidatesToTrickle.push_back(
+ Tuple<Level, Mid, Candidate>(
+ level, mid, kAEqualsCandidate + candidate.str()));
+ candidates.push_back(candidate.str());
+ }
+ }
+
+ // Stomp existing candidates
+ mCandidates[level][component] = candidates;
+
+ // Stomp existing defaults
+ mDefaultCandidates[level][component] =
+ std::make_pair("192.168.0.1", port);
+ session.UpdateDefaultCandidate(
+ mDefaultCandidates[level][RTP].first,
+ mDefaultCandidates[level][RTP].second,
+ // Will be empty string if not present, which is how we indicate
+ // that there is no default for RTCP
+ mDefaultCandidates[level][RTCP].first,
+ mDefaultCandidates[level][RTCP].second,
+ level);
+ }
+
+ void FinishGathering(JsepSession& session) const
+ {
+ // Copy so we can be terse and use []
+ for (auto levelAndCandidates : mDefaultCandidates) {
+ ASSERT_EQ(1U, levelAndCandidates.second.count(RTP));
+ // do a final UpdateDefaultCandidate here in case candidates were
+ // cleared during renegotiation.
+ session.UpdateDefaultCandidate(
+ levelAndCandidates.second[RTP].first,
+ levelAndCandidates.second[RTP].second,
+ // Will be empty string if not present, which is how we indicate
+ // that there is no default for RTCP
+ levelAndCandidates.second[RTCP].first,
+ levelAndCandidates.second[RTCP].second,
+ levelAndCandidates.first);
+ session.EndOfLocalCandidates(levelAndCandidates.first);
+ }
+ }
+
+ void Trickle(JsepSession& session)
+ {
+ for (const auto& levelMidAndCandidate : mCandidatesToTrickle) {
+ Level level;
+ Mid mid;
+ Candidate candidate;
+ Tie(level, mid, candidate) = levelMidAndCandidate;
+ session.AddRemoteIceCandidate(candidate, mid, level);
+ }
+ mCandidatesToTrickle.clear();
+ }
+
+ void CheckRtpCandidates(bool expectRtpCandidates,
+ const SdpMediaSection& msection,
+ size_t transportLevel,
+ const std::string& context) const
+ {
+ auto& attrs = msection.GetAttributeList();
+
+ ASSERT_EQ(expectRtpCandidates,
+ attrs.HasAttribute(SdpAttribute::kCandidateAttribute))
+ << context << " (level " << msection.GetLevel() << ")";
+
+ if (expectRtpCandidates) {
+ // Copy so we can be terse and use []
+ auto expectedCandidates = mCandidates;
+ ASSERT_LE(kNumCandidatesPerComponent,
+ expectedCandidates[transportLevel][RTP].size());
+
+ auto& candidates = attrs.GetCandidate();
+ ASSERT_LE(kNumCandidatesPerComponent, candidates.size())
+ << context << " (level " << msection.GetLevel() << ")";
+ for (size_t i = 0; i < kNumCandidatesPerComponent; ++i) {
+ ASSERT_EQ(expectedCandidates[transportLevel][RTP][i], candidates[i])
+ << context << " (level " << msection.GetLevel() << ")";
+ }
+ }
+ }
+
+ void CheckRtcpCandidates(bool expectRtcpCandidates,
+ const SdpMediaSection& msection,
+ size_t transportLevel,
+ const std::string& context) const
+ {
+ auto& attrs = msection.GetAttributeList();
+
+ if (expectRtcpCandidates) {
+ // Copy so we can be terse and use []
+ auto expectedCandidates = mCandidates;
+ ASSERT_LE(kNumCandidatesPerComponent,
+ expectedCandidates[transportLevel][RTCP].size());
+
+ ASSERT_TRUE(attrs.HasAttribute(SdpAttribute::kCandidateAttribute))
+ << context << " (level " << msection.GetLevel() << ")";
+ auto& candidates = attrs.GetCandidate();
+ ASSERT_EQ(kNumCandidatesPerComponent * 2, candidates.size())
+ << context << " (level " << msection.GetLevel() << ")";
+ for (size_t i = 0; i < kNumCandidatesPerComponent; ++i) {
+ ASSERT_EQ(expectedCandidates[transportLevel][RTCP][i],
+ candidates[i + kNumCandidatesPerComponent])
+ << context << " (level " << msection.GetLevel() << ")";
+ }
+ }
+ }
+
+ void CheckDefaultRtpCandidate(bool expectDefault,
+ const SdpMediaSection& msection,
+ size_t transportLevel,
+ const std::string& context) const
+ {
+ if (expectDefault) {
+ // Copy so we can be terse and use []
+ auto defaultCandidates = mDefaultCandidates;
+ ASSERT_EQ(defaultCandidates[transportLevel][RTP].first,
+ msection.GetConnection().GetAddress())
+ << context << " (level " << msection.GetLevel() << ")";
+ ASSERT_EQ(defaultCandidates[transportLevel][RTP].second,
+ msection.GetPort())
+ << context << " (level " << msection.GetLevel() << ")";
+ } else {
+ ASSERT_EQ("0.0.0.0", msection.GetConnection().GetAddress())
+ << context << " (level " << msection.GetLevel() << ")";
+ ASSERT_EQ(9U, msection.GetPort())
+ << context << " (level " << msection.GetLevel() << ")";
+ }
+ }
+
+ void CheckDefaultRtcpCandidate(bool expectDefault,
+ const SdpMediaSection& msection,
+ size_t transportLevel,
+ const std::string& context) const
+ {
+ if (expectDefault) {
+ // Copy so we can be terse and use []
+ auto defaultCandidates = mDefaultCandidates;
+ ASSERT_TRUE(msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute))
+ << context << " (level " << msection.GetLevel() << ")";
+ auto& rtcpAttr = msection.GetAttributeList().GetRtcp();
+ ASSERT_EQ(defaultCandidates[transportLevel][RTCP].second,
+ rtcpAttr.mPort)
+ << context << " (level " << msection.GetLevel() << ")";
+ ASSERT_EQ(sdp::kInternet, rtcpAttr.mNetType)
+ << context << " (level " << msection.GetLevel() << ")";
+ ASSERT_EQ(sdp::kIPv4, rtcpAttr.mAddrType)
+ << context << " (level " << msection.GetLevel() << ")";
+ ASSERT_EQ(defaultCandidates[transportLevel][RTCP].first,
+ rtcpAttr.mAddress)
+ << context << " (level " << msection.GetLevel() << ")";
+ } else {
+ ASSERT_FALSE(msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute))
+ << context << " (level " << msection.GetLevel() << ")";
+ }
+ }
+
+ private:
+ typedef size_t Level;
+ typedef std::string Mid;
+ typedef std::string Candidate;
+ typedef std::string Address;
+ typedef uint16_t Port;
+ // Default candidates are put into the m-line, c-line, and rtcp
+ // attribute for endpoints that don't support ICE.
+ std::map<Level,
+ std::map<ComponentType,
+ std::pair<Address, Port>>> mDefaultCandidates;
+ std::map<Level,
+ std::map<ComponentType,
+ std::vector<Candidate>>> mCandidates;
+ // Level/mid/candidate tuples that need to be trickled
+ std::vector<Tuple<Level, Mid, Candidate>> mCandidatesToTrickle;
+ };
+
+ // For streaming parse errors
+ std::string
+ GetParseErrors(const SipccSdpParser& parser) const
+ {
+ std::stringstream output;
+ for (auto e = parser.GetParseErrors().begin();
+ e != parser.GetParseErrors().end();
+ ++e) {
+ output << e->first << ": " << e->second << std::endl;
+ }
+ return output.str();
+ }
+
+ void CheckEndOfCandidates(bool expectEoc,
+ const SdpMediaSection& msection,
+ const std::string& context)
+ {
+ if (expectEoc) {
+ ASSERT_TRUE(msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kEndOfCandidatesAttribute))
+ << context << " (level " << msection.GetLevel() << ")";
+ } else {
+ ASSERT_FALSE(msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kEndOfCandidatesAttribute))
+ << context << " (level " << msection.GetLevel() << ")";
+ }
+ }
+
+ void CheckPairs(const JsepSession& session, const std::string& context)
+ {
+ auto pairs = session.GetNegotiatedTrackPairs();
+
+ for (JsepTrackPair& pair : pairs) {
+ if (types.size() == 1) {
+ ASSERT_FALSE(pair.mBundleLevel.isSome()) << context;
+ } else {
+ ASSERT_TRUE(pair.mBundleLevel.isSome()) << context;
+ ASSERT_EQ(0U, *pair.mBundleLevel) << context;
+ }
+ }
+ }
+
+ void
+ DisableMsid(std::string* sdp) const {
+ size_t pos = sdp->find("a=msid-semantic");
+ ASSERT_NE(std::string::npos, pos);
+ (*sdp)[pos + 2] = 'X'; // garble, a=Xsid-semantic
+ }
+
+ void
+ DisableBundle(std::string* sdp) const {
+ size_t pos = sdp->find("a=group:BUNDLE");
+ ASSERT_NE(std::string::npos, pos);
+ (*sdp)[pos + 11] = 'G'; // garble, a=group:BUNGLE
+ }
+
+ void
+ DisableMsection(std::string* sdp, size_t level) const {
+ UniquePtr<Sdp> parsed(Parse(*sdp));
+ ASSERT_TRUE(parsed.get());
+ ASSERT_LT(level, parsed->GetMediaSectionCount());
+ SdpHelper::DisableMsection(parsed.get(), &parsed->GetMediaSection(level));
+ (*sdp) = parsed->ToString();
+ }
+
+ void
+ ReplaceInSdp(std::string* sdp,
+ const char* searchStr,
+ const char* replaceStr) const
+ {
+ if (searchStr[0] == '\0') return;
+ size_t pos;
+ while ((pos = sdp->find(searchStr)) != std::string::npos) {
+ sdp->replace(pos, strlen(searchStr), replaceStr);
+ }
+ }
+
+ void
+ ValidateDisabledMSection(const SdpMediaSection* msection)
+ {
+ ASSERT_EQ(1U, msection->GetFormats().size());
+ // Maybe validate that no attributes are present except rtpmap and
+ // inactive? How?
+ ASSERT_EQ(SdpDirectionAttribute::kInactive,
+ msection->GetDirectionAttribute().mValue);
+ if (msection->GetMediaType() == SdpMediaSection::kAudio) {
+ ASSERT_EQ("0", msection->GetFormats()[0]);
+ const SdpRtpmapAttributeList::Rtpmap* rtpmap(msection->FindRtpmap("0"));
+ ASSERT_TRUE(rtpmap);
+ ASSERT_EQ("0", rtpmap->pt);
+ ASSERT_EQ("PCMU", rtpmap->name);
+ } else if (msection->GetMediaType() == SdpMediaSection::kVideo) {
+ ASSERT_EQ("120", msection->GetFormats()[0]);
+ const SdpRtpmapAttributeList::Rtpmap* rtpmap(msection->FindRtpmap("120"));
+ ASSERT_TRUE(rtpmap);
+ ASSERT_EQ("120", rtpmap->pt);
+ ASSERT_EQ("VP8", rtpmap->name);
+ } else if (msection->GetMediaType() == SdpMediaSection::kApplication) {
+ ASSERT_EQ("5000", msection->GetFormats()[0]);
+ const SdpSctpmapAttributeList::Sctpmap* sctpmap(msection->FindSctpmap("5000"));
+ ASSERT_TRUE(sctpmap);
+ ASSERT_EQ("5000", sctpmap->pt);
+ ASSERT_EQ("rejected", sctpmap->name);
+ ASSERT_EQ(0U, sctpmap->streams);
+ } else {
+ // Not that we would have any test which tests this...
+ ASSERT_EQ("19", msection->GetFormats()[0]);
+ const SdpRtpmapAttributeList::Rtpmap* rtpmap(msection->FindRtpmap("19"));
+ ASSERT_TRUE(rtpmap);
+ ASSERT_EQ("19", rtpmap->pt);
+ ASSERT_EQ("reserved", rtpmap->name);
+ }
+ }
+
+ void
+ DumpTrack(const JsepTrack& track)
+ {
+ const JsepTrackNegotiatedDetails* details = track.GetNegotiatedDetails();
+ std::cerr << " type=" << track.GetMediaType() << std::endl;
+ std::cerr << " encodings=" << std::endl;
+ for (size_t i = 0; i < details->GetEncodingCount(); ++i) {
+ const JsepTrackEncoding& encoding = details->GetEncoding(i);
+ std::cerr << " id=" << encoding.mRid << std::endl;
+ for (const JsepCodecDescription* codec : encoding.GetCodecs()) {
+ std::cerr << " " << codec->mName
+ << " enabled(" << (codec->mEnabled?"yes":"no") << ")";
+ if (track.GetMediaType() == SdpMediaSection::kAudio) {
+ const JsepAudioCodecDescription* audioCodec =
+ static_cast<const JsepAudioCodecDescription*>(codec);
+ std::cerr << " dtmf(" << (audioCodec->mDtmfEnabled?"yes":"no") << ")";
+ }
+ std::cerr << std::endl;
+ }
+ }
+ }
+
+ void
+ DumpTrackPairs(const JsepSessionImpl& session)
+ {
+ auto pairs = mSessionAns.GetNegotiatedTrackPairs();
+ for (auto i = pairs.begin(); i != pairs.end(); ++i) {
+ std::cerr << "Track pair " << i->mLevel << std::endl;
+ if (i->mSending) {
+ std::cerr << "Sending-->" << std::endl;
+ DumpTrack(*i->mSending);
+ }
+ if (i->mReceiving) {
+ std::cerr << "Receiving-->" << std::endl;
+ DumpTrack(*i->mReceiving);
+ }
+ }
+ }
+
+ UniquePtr<Sdp>
+ Parse(const std::string& sdp) const
+ {
+ SipccSdpParser parser;
+ UniquePtr<Sdp> parsed = parser.Parse(sdp);
+ EXPECT_TRUE(parsed.get()) << "Should have valid SDP" << std::endl
+ << "Errors were: " << GetParseErrors(parser);
+ return parsed;
+ }
+
+ JsepSessionImpl mSessionOff;
+ CandidateSet mOffCandidates;
+ JsepSessionImpl mSessionAns;
+ CandidateSet mAnsCandidates;
+ std::vector<SdpMediaSection::MediaType> types;
+ std::vector<std::pair<std::string, uint16_t>> mGatheredCandidates;
+
+private:
+ void
+ ValidateTransport(TransportData& source, const std::string& sdp_str)
+ {
+ UniquePtr<Sdp> sdp(Parse(sdp_str));
+ ASSERT_TRUE(!!sdp);
+ size_t num_m_sections = sdp->GetMediaSectionCount();
+ for (size_t i = 0; i < num_m_sections; ++i) {
+ auto& msection = sdp->GetMediaSection(i);
+
+ if (msection.GetMediaType() == SdpMediaSection::kApplication) {
+ ASSERT_EQ(SdpMediaSection::kDtlsSctp, msection.GetProtocol());
+ } else {
+ ASSERT_EQ(SdpMediaSection::kUdpTlsRtpSavpf, msection.GetProtocol());
+ }
+
+ if (msection.GetPort() == 0) {
+ ValidateDisabledMSection(&msection);
+ continue;
+ }
+ const SdpAttributeList& attrs = msection.GetAttributeList();
+ ASSERT_EQ(source.mIceUfrag, attrs.GetIceUfrag());
+ ASSERT_EQ(source.mIcePwd, attrs.GetIcePwd());
+ const SdpFingerprintAttributeList& fps = attrs.GetFingerprint();
+ for (auto fp = fps.mFingerprints.begin(); fp != fps.mFingerprints.end();
+ ++fp) {
+ std::string alg_str = "None";
+
+ if (fp->hashFunc == SdpFingerprintAttributeList::kSha1) {
+ alg_str = "sha-1";
+ } else if (fp->hashFunc == SdpFingerprintAttributeList::kSha256) {
+ alg_str = "sha-256";
+ }
+
+ ASSERT_EQ(source.mFingerprints[alg_str], fp->fingerprint);
+ }
+ ASSERT_EQ(source.mFingerprints.size(), fps.mFingerprints.size());
+ }
+ }
+
+ TransportData mOffererTransport;
+ TransportData mAnswererTransport;
+};
+
+TEST_F(JsepSessionTestBase, CreateDestroy) {}
+
+TEST_P(JsepSessionTest, CreateOffer)
+{
+ AddTracks(mSessionOff);
+ CreateOffer();
+}
+
+TEST_P(JsepSessionTest, CreateOfferSetLocal)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+}
+
+TEST_P(JsepSessionTest, CreateOfferSetLocalSetRemote)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+}
+
+TEST_P(JsepSessionTest, CreateOfferSetLocalSetRemoteCreateAnswer)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+}
+
+TEST_P(JsepSessionTest, CreateOfferSetLocalSetRemoteCreateAnswerSetLocal)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+}
+
+TEST_P(JsepSessionTest, FullCall)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+}
+
+TEST_P(JsepSessionTest, RenegotiationNoChange)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(types.size(), added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(types.size(), added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::string reoffer = CreateOffer();
+ SetLocalOffer(reoffer);
+ SetRemoteOffer(reoffer);
+
+ added = mSessionAns.GetRemoteTracksAdded();
+ removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ std::string reanswer = CreateAnswer();
+ SetLocalAnswer(reanswer);
+ SetRemoteAnswer(reanswer);
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size());
+ for (size_t i = 0; i < answererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationOffererAddsTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::vector<SdpMediaSection::MediaType> extraTypes;
+ extraTypes.push_back(SdpMediaSection::kAudio);
+ extraTypes.push_back(SdpMediaSection::kVideo);
+ AddTracks(mSessionOff, extraTypes);
+ types.insert(types.end(), extraTypes.begin(), extraTypes.end());
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(2U, added.size());
+ ASSERT_EQ(0U, removed.size());
+ ASSERT_EQ(SdpMediaSection::kAudio, added[0]->GetMediaType());
+ ASSERT_EQ(SdpMediaSection::kVideo, added[1]->GetMediaType());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size() + 2, newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ ASSERT_EQ(answererPairs.size() + 2, newAnswererPairs.size());
+ for (size_t i = 0; i < answererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationAnswererAddsTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::vector<SdpMediaSection::MediaType> extraTypes;
+ extraTypes.push_back(SdpMediaSection::kAudio);
+ extraTypes.push_back(SdpMediaSection::kVideo);
+ AddTracks(mSessionAns, extraTypes);
+ types.insert(types.end(), extraTypes.begin(), extraTypes.end());
+
+ // We need to add a recvonly m-section to the offer for this to work
+ JsepOfferOptions options;
+ options.mOfferToReceiveAudio =
+ Some(GetTrackCount(mSessionOff, SdpMediaSection::kAudio) + 1);
+ options.mOfferToReceiveVideo =
+ Some(GetTrackCount(mSessionOff, SdpMediaSection::kVideo) + 1);
+
+ std::string offer = CreateOffer(Some(options));
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(2U, added.size());
+ ASSERT_EQ(0U, removed.size());
+ ASSERT_EQ(SdpMediaSection::kAudio, added[0]->GetMediaType());
+ ASSERT_EQ(SdpMediaSection::kVideo, added[1]->GetMediaType());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size() + 2, newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ ASSERT_EQ(answererPairs.size() + 2, newAnswererPairs.size());
+ for (size_t i = 0; i < answererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationBothAddTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::vector<SdpMediaSection::MediaType> extraTypes;
+ extraTypes.push_back(SdpMediaSection::kAudio);
+ extraTypes.push_back(SdpMediaSection::kVideo);
+ AddTracks(mSessionAns, extraTypes);
+ AddTracks(mSessionOff, extraTypes);
+ types.insert(types.end(), extraTypes.begin(), extraTypes.end());
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(2U, added.size());
+ ASSERT_EQ(0U, removed.size());
+ ASSERT_EQ(SdpMediaSection::kAudio, added[0]->GetMediaType());
+ ASSERT_EQ(SdpMediaSection::kVideo, added[1]->GetMediaType());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(2U, added.size());
+ ASSERT_EQ(0U, removed.size());
+ ASSERT_EQ(SdpMediaSection::kAudio, added[0]->GetMediaType());
+ ASSERT_EQ(SdpMediaSection::kVideo, added[1]->GetMediaType());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size() + 2, newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ ASSERT_EQ(answererPairs.size() + 2, newAnswererPairs.size());
+ for (size_t i = 0; i < answererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationBothAddTracksToExistingStream)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ if (GetParam() == "datachannel") {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto oHasStream = HasMediaStream(mSessionOff.GetLocalTracks());
+ auto aHasStream = HasMediaStream(mSessionAns.GetLocalTracks());
+ ASSERT_EQ(oHasStream, GetLocalUniqueStreamIds(mSessionOff).size());
+ ASSERT_EQ(aHasStream, GetLocalUniqueStreamIds(mSessionAns).size());
+ ASSERT_EQ(aHasStream, GetRemoteUniqueStreamIds(mSessionOff).size());
+ ASSERT_EQ(oHasStream, GetRemoteUniqueStreamIds(mSessionAns).size());
+
+ auto firstOffId = GetFirstLocalStreamId(mSessionOff);
+ auto firstAnsId = GetFirstLocalStreamId(mSessionAns);
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::vector<SdpMediaSection::MediaType> extraTypes;
+ extraTypes.push_back(SdpMediaSection::kAudio);
+ extraTypes.push_back(SdpMediaSection::kVideo);
+ AddTracksToStream(mSessionOff, firstOffId, extraTypes);
+ AddTracksToStream(mSessionAns, firstAnsId, extraTypes);
+ types.insert(types.end(), extraTypes.begin(), extraTypes.end());
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ oHasStream = HasMediaStream(mSessionOff.GetLocalTracks());
+ aHasStream = HasMediaStream(mSessionAns.GetLocalTracks());
+
+ ASSERT_EQ(oHasStream, GetLocalUniqueStreamIds(mSessionOff).size());
+ ASSERT_EQ(aHasStream, GetLocalUniqueStreamIds(mSessionAns).size());
+ ASSERT_EQ(aHasStream, GetRemoteUniqueStreamIds(mSessionOff).size());
+ ASSERT_EQ(oHasStream, GetRemoteUniqueStreamIds(mSessionAns).size());
+ if (oHasStream) {
+ ASSERT_STREQ(firstOffId.c_str(),
+ GetFirstLocalStreamId(mSessionOff).c_str());
+ }
+ if (aHasStream) {
+ ASSERT_STREQ(firstAnsId.c_str(),
+ GetFirstLocalStreamId(mSessionAns).c_str());
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationOffererRemovesTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ if (types.front() == SdpMediaSection::kApplication) {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ RefPtr<JsepTrack> removedTrack = GetTrackOff(0, types.front());
+ ASSERT_TRUE(removedTrack);
+ ASSERT_EQ(NS_OK, mSessionOff.RemoveTrack(removedTrack->GetStreamId(),
+ removedTrack->GetTrackId()));
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrack->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrack->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrack->GetTrackId(), removed[0]->GetTrackId());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ // First m-section should be recvonly
+ auto offer = GetParsedLocalDescription(mSessionOff);
+ auto* msection = GetMsection(*offer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_FALSE(msection->IsSending());
+
+ // First audio m-section should be sendonly
+ auto answer = GetParsedLocalDescription(mSessionAns);
+ msection = GetMsection(*answer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_FALSE(msection->IsReceiving());
+ ASSERT_TRUE(msection->IsSending());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ // Will be the same size since we still have a track on one side.
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+
+ // This should be the only difference.
+ ASSERT_TRUE(offererPairs[0].mSending);
+ ASSERT_FALSE(newOffererPairs[0].mSending);
+
+ // Remove this difference, let loop below take care of the rest
+ offererPairs[0].mSending = nullptr;
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ // Will be the same size since we still have a track on one side.
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size());
+
+ // This should be the only difference.
+ ASSERT_TRUE(answererPairs[0].mReceiving);
+ ASSERT_FALSE(newAnswererPairs[0].mReceiving);
+
+ // Remove this difference, let loop below take care of the rest
+ answererPairs[0].mReceiving = nullptr;
+ for (size_t i = 0; i < answererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationAnswererRemovesTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ if (types.front() == SdpMediaSection::kApplication) {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ RefPtr<JsepTrack> removedTrack = GetTrackAns(0, types.front());
+ ASSERT_TRUE(removedTrack);
+ ASSERT_EQ(NS_OK, mSessionAns.RemoveTrack(removedTrack->GetStreamId(),
+ removedTrack->GetTrackId()));
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrack->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrack->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrack->GetTrackId(), removed[0]->GetTrackId());
+
+ // First m-section should be sendrecv
+ auto offer = GetParsedLocalDescription(mSessionOff);
+ auto* msection = GetMsection(*offer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_TRUE(msection->IsSending());
+
+ // First audio m-section should be recvonly
+ auto answer = GetParsedLocalDescription(mSessionAns);
+ msection = GetMsection(*answer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_FALSE(msection->IsSending());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ // Will be the same size since we still have a track on one side.
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+
+ // This should be the only difference.
+ ASSERT_TRUE(offererPairs[0].mReceiving);
+ ASSERT_FALSE(newOffererPairs[0].mReceiving);
+
+ // Remove this difference, let loop below take care of the rest
+ offererPairs[0].mReceiving = nullptr;
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ // Will be the same size since we still have a track on one side.
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size());
+
+ // This should be the only difference.
+ ASSERT_TRUE(answererPairs[0].mSending);
+ ASSERT_FALSE(newAnswererPairs[0].mSending);
+
+ // Remove this difference, let loop below take care of the rest
+ answererPairs[0].mSending = nullptr;
+ for (size_t i = 0; i < answererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationBothRemoveTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ if (types.front() == SdpMediaSection::kApplication) {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ RefPtr<JsepTrack> removedTrackAnswer = GetTrackAns(0, types.front());
+ ASSERT_TRUE(removedTrackAnswer);
+ ASSERT_EQ(NS_OK, mSessionAns.RemoveTrack(removedTrackAnswer->GetStreamId(),
+ removedTrackAnswer->GetTrackId()));
+
+ RefPtr<JsepTrack> removedTrackOffer = GetTrackOff(0, types.front());
+ ASSERT_TRUE(removedTrackOffer);
+ ASSERT_EQ(NS_OK, mSessionOff.RemoveTrack(removedTrackOffer->GetStreamId(),
+ removedTrackOffer->GetTrackId()));
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrackOffer->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrackOffer->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrackOffer->GetTrackId(), removed[0]->GetTrackId());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrackAnswer->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrackAnswer->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrackAnswer->GetTrackId(), removed[0]->GetTrackId());
+
+ // First m-section should be recvonly
+ auto offer = GetParsedLocalDescription(mSessionOff);
+ auto* msection = GetMsection(*offer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_FALSE(msection->IsSending());
+
+ // First m-section should be inactive, and rejected
+ auto answer = GetParsedLocalDescription(mSessionAns);
+ msection = GetMsection(*answer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_FALSE(msection->IsReceiving());
+ ASSERT_FALSE(msection->IsSending());
+ ASSERT_FALSE(msection->GetPort());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size() + 1);
+
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ JsepTrackPair oldPair(offererPairs[i + 1]);
+ JsepTrackPair newPair(newOffererPairs[i]);
+ ASSERT_EQ(oldPair.mLevel, newPair.mLevel);
+ ASSERT_EQ(oldPair.mSending.get(), newPair.mSending.get());
+ ASSERT_EQ(oldPair.mReceiving.get(), newPair.mReceiving.get());
+ ASSERT_TRUE(oldPair.mBundleLevel.isSome());
+ ASSERT_TRUE(newPair.mBundleLevel.isSome());
+ ASSERT_EQ(0U, *oldPair.mBundleLevel);
+ ASSERT_EQ(1U, *newPair.mBundleLevel);
+ }
+
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size() + 1);
+
+ for (size_t i = 0; i < newAnswererPairs.size(); ++i) {
+ JsepTrackPair oldPair(answererPairs[i + 1]);
+ JsepTrackPair newPair(newAnswererPairs[i]);
+ ASSERT_EQ(oldPair.mLevel, newPair.mLevel);
+ ASSERT_EQ(oldPair.mSending.get(), newPair.mSending.get());
+ ASSERT_EQ(oldPair.mReceiving.get(), newPair.mReceiving.get());
+ ASSERT_TRUE(oldPair.mBundleLevel.isSome());
+ ASSERT_TRUE(newPair.mBundleLevel.isSome());
+ ASSERT_EQ(0U, *oldPair.mBundleLevel);
+ ASSERT_EQ(1U, *newPair.mBundleLevel);
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationBothRemoveThenAddTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ if (types.front() == SdpMediaSection::kApplication) {
+ return;
+ }
+
+ SdpMediaSection::MediaType removedType = types.front();
+
+ OfferAnswer();
+
+ RefPtr<JsepTrack> removedTrackAnswer = GetTrackAns(0, removedType);
+ ASSERT_TRUE(removedTrackAnswer);
+ ASSERT_EQ(NS_OK, mSessionAns.RemoveTrack(removedTrackAnswer->GetStreamId(),
+ removedTrackAnswer->GetTrackId()));
+
+ RefPtr<JsepTrack> removedTrackOffer = GetTrackOff(0, removedType);
+ ASSERT_TRUE(removedTrackOffer);
+ ASSERT_EQ(NS_OK, mSessionOff.RemoveTrack(removedTrackOffer->GetStreamId(),
+ removedTrackOffer->GetTrackId()));
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::vector<SdpMediaSection::MediaType> extraTypes;
+ extraTypes.push_back(removedType);
+ AddTracks(mSessionAns, extraTypes);
+ AddTracks(mSessionOff, extraTypes);
+ types.insert(types.end(), extraTypes.begin(), extraTypes.end());
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(1U, added.size());
+ ASSERT_EQ(0U, removed.size());
+ ASSERT_EQ(removedType, added[0]->GetMediaType());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(1U, added.size());
+ ASSERT_EQ(0U, removed.size());
+ ASSERT_EQ(removedType, added[0]->GetMediaType());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size() + 1, newOffererPairs.size());
+ ASSERT_EQ(answererPairs.size() + 1, newAnswererPairs.size());
+
+ // Ensure that the m-section was re-used; no gaps
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ ASSERT_EQ(i, newOffererPairs[i].mLevel);
+ }
+ for (size_t i = 0; i < newAnswererPairs.size(); ++i) {
+ ASSERT_EQ(i, newAnswererPairs[i].mLevel);
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationBothRemoveTrackDifferentMsection)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ if (types.front() == SdpMediaSection::kApplication) {
+ return;
+ }
+
+ if (types.size() < 2 || types[0] != types[1]) {
+ // For simplicity, just run in cases where we have two of the same type
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ RefPtr<JsepTrack> removedTrackAnswer = GetTrackAns(0, types.front());
+ ASSERT_TRUE(removedTrackAnswer);
+ ASSERT_EQ(NS_OK, mSessionAns.RemoveTrack(removedTrackAnswer->GetStreamId(),
+ removedTrackAnswer->GetTrackId()));
+
+ // Second instance of the same type
+ RefPtr<JsepTrack> removedTrackOffer = GetTrackOff(1, types.front());
+ ASSERT_TRUE(removedTrackOffer);
+ ASSERT_EQ(NS_OK, mSessionOff.RemoveTrack(removedTrackOffer->GetStreamId(),
+ removedTrackOffer->GetTrackId()));
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrackOffer->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrackOffer->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrackOffer->GetTrackId(), removed[0]->GetTrackId());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrackAnswer->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrackAnswer->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrackAnswer->GetTrackId(), removed[0]->GetTrackId());
+
+ // Second m-section should be recvonly
+ auto offer = GetParsedLocalDescription(mSessionOff);
+ auto* msection = GetMsection(*offer, types.front(), 1);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_FALSE(msection->IsSending());
+
+ // First m-section should be recvonly
+ auto answer = GetParsedLocalDescription(mSessionAns);
+ msection = GetMsection(*answer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_FALSE(msection->IsSending());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+
+ // This should be the only difference.
+ ASSERT_TRUE(offererPairs[0].mReceiving);
+ ASSERT_FALSE(newOffererPairs[0].mReceiving);
+
+ // Remove this difference, let loop below take care of the rest
+ offererPairs[0].mReceiving = nullptr;
+
+ // This should be the only difference.
+ ASSERT_TRUE(offererPairs[1].mSending);
+ ASSERT_FALSE(newOffererPairs[1].mSending);
+
+ // Remove this difference, let loop below take care of the rest
+ offererPairs[1].mSending = nullptr;
+
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size());
+
+ // This should be the only difference.
+ ASSERT_TRUE(answererPairs[0].mSending);
+ ASSERT_FALSE(newAnswererPairs[0].mSending);
+
+ // Remove this difference, let loop below take care of the rest
+ answererPairs[0].mSending = nullptr;
+
+ // This should be the only difference.
+ ASSERT_TRUE(answererPairs[1].mReceiving);
+ ASSERT_FALSE(newAnswererPairs[1].mReceiving);
+
+ // Remove this difference, let loop below take care of the rest
+ answererPairs[1].mReceiving = nullptr;
+
+ for (size_t i = 0; i < newAnswererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationOffererReplacesTrack)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ if (types.front() == SdpMediaSection::kApplication) {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ RefPtr<JsepTrack> removedTrack = GetTrackOff(0, types.front());
+ ASSERT_TRUE(removedTrack);
+ ASSERT_EQ(NS_OK, mSessionOff.RemoveTrack(removedTrack->GetStreamId(),
+ removedTrack->GetTrackId()));
+ RefPtr<JsepTrack> addedTrack(
+ new JsepTrack(types.front(), "newstream", "newtrack"));
+ ASSERT_EQ(NS_OK, mSessionOff.AddTrack(addedTrack));
+
+ OfferAnswer(CHECK_SUCCESS);
+
+ auto added = mSessionAns.GetRemoteTracksAdded();
+ auto removed = mSessionAns.GetRemoteTracksRemoved();
+ ASSERT_EQ(1U, added.size());
+ ASSERT_EQ(1U, removed.size());
+
+ ASSERT_EQ(removedTrack->GetMediaType(), removed[0]->GetMediaType());
+ ASSERT_EQ(removedTrack->GetStreamId(), removed[0]->GetStreamId());
+ ASSERT_EQ(removedTrack->GetTrackId(), removed[0]->GetTrackId());
+
+ ASSERT_EQ(addedTrack->GetMediaType(), added[0]->GetMediaType());
+ ASSERT_EQ(addedTrack->GetStreamId(), added[0]->GetStreamId());
+ ASSERT_EQ(addedTrack->GetTrackId(), added[0]->GetTrackId());
+
+ added = mSessionOff.GetRemoteTracksAdded();
+ removed = mSessionOff.GetRemoteTracksRemoved();
+ ASSERT_EQ(0U, added.size());
+ ASSERT_EQ(0U, removed.size());
+
+ // First audio m-section should be sendrecv
+ auto offer = GetParsedLocalDescription(mSessionOff);
+ auto* msection = GetMsection(*offer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_TRUE(msection->IsSending());
+
+ // First audio m-section should be sendrecv
+ auto answer = GetParsedLocalDescription(mSessionAns);
+ msection = GetMsection(*answer, types.front(), 0);
+ ASSERT_TRUE(msection);
+ ASSERT_TRUE(msection->IsReceiving());
+ ASSERT_TRUE(msection->IsSending());
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+
+ ASSERT_NE(offererPairs[0].mSending->GetStreamId(),
+ newOffererPairs[0].mSending->GetStreamId());
+ ASSERT_NE(offererPairs[0].mSending->GetTrackId(),
+ newOffererPairs[0].mSending->GetTrackId());
+
+ // Skip first pair
+ for (size_t i = 1; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size());
+
+ ASSERT_NE(answererPairs[0].mReceiving->GetStreamId(),
+ newAnswererPairs[0].mReceiving->GetStreamId());
+ ASSERT_NE(answererPairs[0].mReceiving->GetTrackId(),
+ newAnswererPairs[0].mReceiving->GetTrackId());
+
+ // Skip first pair
+ for (size_t i = 1; i < newAnswererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(answererPairs[i], newAnswererPairs[i]));
+ }
+}
+
+// Tests whether auto-assigned remote msids (ie; what happens when the other
+// side doesn't use msid attributes) are stable across renegotiation.
+TEST_P(JsepSessionTest, RenegotiationAutoAssignedMsidIsStable)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+
+ DisableMsid(&answer);
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+
+ // Make sure that DisableMsid actually worked, since it is kinda hacky
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+ ASSERT_EQ(offererPairs.size(), answererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(offererPairs[i].mReceiving);
+ ASSERT_TRUE(answererPairs[i].mSending);
+ // These should not match since we've monkeyed with the msid
+ ASSERT_NE(offererPairs[i].mReceiving->GetStreamId(),
+ answererPairs[i].mSending->GetStreamId());
+ ASSERT_NE(offererPairs[i].mReceiving->GetTrackId(),
+ answererPairs[i].mSending->GetTrackId());
+ }
+
+ offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ answer = CreateAnswer();
+ SetLocalAnswer(answer);
+
+ DisableMsid(&answer);
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto newOffererPairs = mSessionOff.GetNegotiatedTrackPairs();
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_TRUE(Equals(offererPairs[i], newOffererPairs[i]));
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationOffererDisablesTelephoneEvent)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+
+ // check all the audio tracks to make sure they have 2 codecs (109 and 101),
+ // and dtmf is enabled on all audio tracks
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ std::vector<JsepTrack*> tracks;
+ tracks.push_back(offererPairs[i].mSending.get());
+ tracks.push_back(offererPairs[i].mReceiving.get());
+ for (JsepTrack *track : tracks) {
+ if (track->GetMediaType() != SdpMediaSection::kAudio) {
+ continue;
+ }
+ const JsepTrackNegotiatedDetails* details = track->GetNegotiatedDetails();
+ ASSERT_EQ(1U, details->GetEncodingCount());
+ const JsepTrackEncoding& encoding = details->GetEncoding(0);
+ ASSERT_EQ(2U, encoding.GetCodecs().size());
+ ASSERT_TRUE(encoding.HasFormat("109"));
+ ASSERT_TRUE(encoding.HasFormat("101"));
+ for (JsepCodecDescription* codec: encoding.GetCodecs()) {
+ ASSERT_TRUE(codec);
+ // we can cast here because we've already checked for audio track
+ JsepAudioCodecDescription *audioCodec =
+ static_cast<JsepAudioCodecDescription*>(codec);
+ ASSERT_TRUE(audioCodec->mDtmfEnabled);
+ }
+ }
+ }
+
+ std::string offer = CreateOffer();
+ ReplaceInSdp(&offer, " 109 101 ", " 109 ");
+ ReplaceInSdp(&offer, "a=fmtp:101 0-15\r\n", "");
+ ReplaceInSdp(&offer, "a=rtpmap:101 telephone-event/8000/1\r\n", "");
+ std::cerr << "modified OFFER: " << offer << std::endl;
+
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+
+ // check all the audio tracks to make sure they have 1 codec (109),
+ // and dtmf is disabled on all audio tracks
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ std::vector<JsepTrack*> tracks;
+ tracks.push_back(newOffererPairs[i].mSending.get());
+ tracks.push_back(newOffererPairs[i].mReceiving.get());
+ for (JsepTrack* track : tracks) {
+ if (track->GetMediaType() != SdpMediaSection::kAudio) {
+ continue;
+ }
+ const JsepTrackNegotiatedDetails* details = track->GetNegotiatedDetails();
+ ASSERT_EQ(1U, details->GetEncodingCount());
+ const JsepTrackEncoding& encoding = details->GetEncoding(0);
+ ASSERT_EQ(1U, encoding.GetCodecs().size());
+ ASSERT_TRUE(encoding.HasFormat("109"));
+ // we can cast here because we've already checked for audio track
+ JsepAudioCodecDescription *audioCodec =
+ static_cast<JsepAudioCodecDescription*>(encoding.GetCodecs()[0]);
+ ASSERT_TRUE(audioCodec);
+ ASSERT_FALSE(audioCodec->mDtmfEnabled);
+ }
+ }
+}
+
+// Tests behavior when the answerer does not use msid in the initial exchange,
+// but does on renegotiation.
+TEST_P(JsepSessionTest, RenegotiationAnswererEnablesMsid)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+
+ DisableMsid(&answer);
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+
+ offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto newOffererPairs = mSessionOff.GetNegotiatedTrackPairs();
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_EQ(offererPairs[i].mReceiving->GetMediaType(),
+ newOffererPairs[i].mReceiving->GetMediaType());
+
+ ASSERT_EQ(offererPairs[i].mSending, newOffererPairs[i].mSending);
+ ASSERT_TRUE(Equals(offererPairs[i].mRtpTransport,
+ newOffererPairs[i].mRtpTransport));
+ ASSERT_TRUE(Equals(offererPairs[i].mRtcpTransport,
+ newOffererPairs[i].mRtcpTransport));
+
+ if (offererPairs[i].mReceiving->GetMediaType() ==
+ SdpMediaSection::kApplication) {
+ ASSERT_EQ(offererPairs[i].mReceiving, newOffererPairs[i].mReceiving);
+ } else {
+ // This should be the only difference
+ ASSERT_NE(offererPairs[i].mReceiving, newOffererPairs[i].mReceiving);
+ }
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationAnswererDisablesMsid)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+
+ offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ AddTracks(mSessionAns);
+ answer = CreateAnswer();
+ SetLocalAnswer(answer);
+
+ DisableMsid(&answer);
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto newOffererPairs = mSessionOff.GetNegotiatedTrackPairs();
+
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+ for (size_t i = 0; i < offererPairs.size(); ++i) {
+ ASSERT_EQ(offererPairs[i].mReceiving->GetMediaType(),
+ newOffererPairs[i].mReceiving->GetMediaType());
+
+ ASSERT_EQ(offererPairs[i].mSending, newOffererPairs[i].mSending);
+ ASSERT_TRUE(Equals(offererPairs[i].mRtpTransport,
+ newOffererPairs[i].mRtpTransport));
+ ASSERT_TRUE(Equals(offererPairs[i].mRtcpTransport,
+ newOffererPairs[i].mRtcpTransport));
+
+ if (offererPairs[i].mReceiving->GetMediaType() ==
+ SdpMediaSection::kApplication) {
+ ASSERT_EQ(offererPairs[i].mReceiving, newOffererPairs[i].mReceiving);
+ } else {
+ // This should be the only difference
+ ASSERT_NE(offererPairs[i].mReceiving, newOffererPairs[i].mReceiving);
+ }
+ }
+}
+
+// Tests behavior when offerer does not use bundle on the initial offer/answer,
+// but does on renegotiation.
+TEST_P(JsepSessionTest, RenegotiationOffererEnablesBundle)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ if (types.size() < 2) {
+ // No bundle will happen here.
+ return;
+ }
+
+ std::string offer = CreateOffer();
+
+ DisableBundle(&offer);
+
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ OfferAnswer();
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(newOffererPairs.size(), newAnswererPairs.size());
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size());
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size());
+
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ // No bundle initially
+ ASSERT_FALSE(offererPairs[i].mBundleLevel.isSome());
+ ASSERT_FALSE(answererPairs[i].mBundleLevel.isSome());
+ if (i != 0) {
+ ASSERT_NE(offererPairs[0].mRtpTransport.get(),
+ offererPairs[i].mRtpTransport.get());
+ if (offererPairs[0].mRtcpTransport) {
+ ASSERT_NE(offererPairs[0].mRtcpTransport.get(),
+ offererPairs[i].mRtcpTransport.get());
+ }
+ ASSERT_NE(answererPairs[0].mRtpTransport.get(),
+ answererPairs[i].mRtpTransport.get());
+ if (answererPairs[0].mRtcpTransport) {
+ ASSERT_NE(answererPairs[0].mRtcpTransport.get(),
+ answererPairs[i].mRtcpTransport.get());
+ }
+ }
+
+ // Verify that bundle worked after renegotiation
+ ASSERT_TRUE(newOffererPairs[i].mBundleLevel.isSome());
+ ASSERT_TRUE(newAnswererPairs[i].mBundleLevel.isSome());
+ ASSERT_EQ(newOffererPairs[0].mRtpTransport.get(),
+ newOffererPairs[i].mRtpTransport.get());
+ ASSERT_EQ(newOffererPairs[0].mRtcpTransport.get(),
+ newOffererPairs[i].mRtcpTransport.get());
+ ASSERT_EQ(newAnswererPairs[0].mRtpTransport.get(),
+ newAnswererPairs[i].mRtpTransport.get());
+ ASSERT_EQ(newAnswererPairs[0].mRtcpTransport.get(),
+ newAnswererPairs[i].mRtcpTransport.get());
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationOffererDisablesBundleTransport)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ if (types.size() < 2) {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::string reoffer = CreateOffer();
+
+ DisableMsection(&reoffer, 0);
+
+ SetLocalOffer(reoffer, CHECK_SUCCESS);
+ SetRemoteOffer(reoffer, CHECK_SUCCESS);
+ std::string reanswer = CreateAnswer();
+ SetLocalAnswer(reanswer, CHECK_SUCCESS);
+ SetRemoteAnswer(reanswer, CHECK_SUCCESS);
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(newOffererPairs.size(), newAnswererPairs.size());
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size() + 1);
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size() + 1);
+
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ ASSERT_TRUE(newOffererPairs[i].mBundleLevel.isSome());
+ ASSERT_TRUE(newAnswererPairs[i].mBundleLevel.isSome());
+ ASSERT_EQ(1U, *newOffererPairs[i].mBundleLevel);
+ ASSERT_EQ(1U, *newAnswererPairs[i].mBundleLevel);
+ ASSERT_EQ(newOffererPairs[0].mRtpTransport.get(),
+ newOffererPairs[i].mRtpTransport.get());
+ ASSERT_EQ(newOffererPairs[0].mRtcpTransport.get(),
+ newOffererPairs[i].mRtcpTransport.get());
+ ASSERT_EQ(newAnswererPairs[0].mRtpTransport.get(),
+ newAnswererPairs[i].mRtpTransport.get());
+ ASSERT_EQ(newAnswererPairs[0].mRtcpTransport.get(),
+ newAnswererPairs[i].mRtcpTransport.get());
+ }
+
+ ASSERT_NE(newOffererPairs[0].mRtpTransport.get(),
+ offererPairs[0].mRtpTransport.get());
+ ASSERT_NE(newAnswererPairs[0].mRtpTransport.get(),
+ answererPairs[0].mRtpTransport.get());
+
+ ASSERT_LE(1U, mSessionOff.GetTransports().size());
+ ASSERT_LE(1U, mSessionAns.GetTransports().size());
+
+ ASSERT_EQ(0U, mSessionOff.GetTransports()[0]->mComponents);
+ ASSERT_EQ(0U, mSessionAns.GetTransports()[0]->mComponents);
+}
+
+TEST_P(JsepSessionTest, RenegotiationAnswererDisablesBundleTransport)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ if (types.size() < 2) {
+ return;
+ }
+
+ OfferAnswer();
+
+ auto offererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto answererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ std::string reoffer = CreateOffer();
+ SetLocalOffer(reoffer, CHECK_SUCCESS);
+ SetRemoteOffer(reoffer, CHECK_SUCCESS);
+ std::string reanswer = CreateAnswer();
+
+ DisableMsection(&reanswer, 0);
+
+ SetLocalAnswer(reanswer, CHECK_SUCCESS);
+ SetRemoteAnswer(reanswer, CHECK_SUCCESS);
+
+ auto newOffererPairs = GetTrackPairsByLevel(mSessionOff);
+ auto newAnswererPairs = GetTrackPairsByLevel(mSessionAns);
+
+ ASSERT_EQ(newOffererPairs.size(), newAnswererPairs.size());
+ ASSERT_EQ(offererPairs.size(), newOffererPairs.size() + 1);
+ ASSERT_EQ(answererPairs.size(), newAnswererPairs.size() + 1);
+
+ for (size_t i = 0; i < newOffererPairs.size(); ++i) {
+ ASSERT_TRUE(newOffererPairs[i].mBundleLevel.isSome());
+ ASSERT_TRUE(newAnswererPairs[i].mBundleLevel.isSome());
+ ASSERT_EQ(1U, *newOffererPairs[i].mBundleLevel);
+ ASSERT_EQ(1U, *newAnswererPairs[i].mBundleLevel);
+ ASSERT_EQ(newOffererPairs[0].mRtpTransport.get(),
+ newOffererPairs[i].mRtpTransport.get());
+ ASSERT_EQ(newOffererPairs[0].mRtcpTransport.get(),
+ newOffererPairs[i].mRtcpTransport.get());
+ ASSERT_EQ(newAnswererPairs[0].mRtpTransport.get(),
+ newAnswererPairs[i].mRtpTransport.get());
+ ASSERT_EQ(newAnswererPairs[0].mRtcpTransport.get(),
+ newAnswererPairs[i].mRtcpTransport.get());
+ }
+
+ ASSERT_NE(newOffererPairs[0].mRtpTransport.get(),
+ offererPairs[0].mRtpTransport.get());
+ ASSERT_NE(newAnswererPairs[0].mRtpTransport.get(),
+ answererPairs[0].mRtpTransport.get());
+}
+
+TEST_P(JsepSessionTest, ParseRejectsBadMediaFormat)
+{
+ if (GetParam() == "datachannel") {
+ return;
+ }
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ UniquePtr<Sdp> munge(Parse(offer));
+ SdpMediaSection& mediaSection = munge->GetMediaSection(0);
+ mediaSection.AddCodec("75", "DummyFormatVal", 8000, 1);
+ std::string sdpString = munge->ToString();
+ nsresult rv = mSessionOff.SetLocalDescription(kJsepSdpOffer, sdpString);
+ ASSERT_EQ(NS_ERROR_INVALID_ARG, rv);
+}
+
+TEST_P(JsepSessionTest, FullCallWithCandidates)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ mOffCandidates.Gather(mSessionOff, types);
+
+ UniquePtr<Sdp> localOffer(Parse(mSessionOff.GetLocalDescription()));
+ for (size_t i = 0; i < localOffer->GetMediaSectionCount(); ++i) {
+ mOffCandidates.CheckRtpCandidates(
+ true, localOffer->GetMediaSection(i), i,
+ "Local offer after gathering should have RTP candidates.");
+ mOffCandidates.CheckDefaultRtpCandidate(
+ true, localOffer->GetMediaSection(i), i,
+ "Local offer after gathering should have a default RTP candidate.");
+ mOffCandidates.CheckRtcpCandidates(
+ types[i] != SdpMediaSection::kApplication,
+ localOffer->GetMediaSection(i), i,
+ "Local offer after gathering should have RTCP candidates "
+ "(unless m=application)");
+ mOffCandidates.CheckDefaultRtcpCandidate(
+ types[i] != SdpMediaSection::kApplication,
+ localOffer->GetMediaSection(i), i,
+ "Local offer after gathering should have a default RTCP candidate "
+ "(unless m=application)");
+ CheckEndOfCandidates(true, localOffer->GetMediaSection(i),
+ "Local offer after gathering should have an end-of-candidates.");
+ }
+
+ SetRemoteOffer(offer);
+ mOffCandidates.Trickle(mSessionAns);
+
+ UniquePtr<Sdp> remoteOffer(Parse(mSessionAns.GetRemoteDescription()));
+ for (size_t i = 0; i < remoteOffer->GetMediaSectionCount(); ++i) {
+ mOffCandidates.CheckRtpCandidates(
+ true, remoteOffer->GetMediaSection(i), i,
+ "Remote offer after trickle should have RTP candidates.");
+ mOffCandidates.CheckDefaultRtpCandidate(
+ false, remoteOffer->GetMediaSection(i), i,
+ "Initial remote offer should not have a default RTP candidate.");
+ mOffCandidates.CheckRtcpCandidates(
+ types[i] != SdpMediaSection::kApplication,
+ remoteOffer->GetMediaSection(i), i,
+ "Remote offer after trickle should have RTCP candidates "
+ "(unless m=application)");
+ mOffCandidates.CheckDefaultRtcpCandidate(
+ false, remoteOffer->GetMediaSection(i), i,
+ "Initial remote offer should not have a default RTCP candidate.");
+ CheckEndOfCandidates(false, remoteOffer->GetMediaSection(i),
+ "Initial remote offer should not have an end-of-candidates.");
+ }
+
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ // This will gather candidates that mSessionAns knows it doesn't need.
+ // They should not be present in the SDP.
+ mAnsCandidates.Gather(mSessionAns, types);
+
+ UniquePtr<Sdp> localAnswer(Parse(mSessionAns.GetLocalDescription()));
+ for (size_t i = 0; i < localAnswer->GetMediaSectionCount(); ++i) {
+ mAnsCandidates.CheckRtpCandidates(
+ i == 0, localAnswer->GetMediaSection(i), i,
+ "Local answer after gathering should have RTP candidates on level 0.");
+ mAnsCandidates.CheckDefaultRtpCandidate(
+ true, localAnswer->GetMediaSection(i), 0,
+ "Local answer after gathering should have a default RTP candidate "
+ "on all levels that matches transport level 0.");
+ mAnsCandidates.CheckRtcpCandidates(
+ false, localAnswer->GetMediaSection(i), i,
+ "Local answer after gathering should not have RTCP candidates "
+ "(because we're answering with rtcp-mux)");
+ mAnsCandidates.CheckDefaultRtcpCandidate(
+ false, localAnswer->GetMediaSection(i), i,
+ "Local answer after gathering should not have a default RTCP candidate "
+ "(because we're answering with rtcp-mux)");
+ CheckEndOfCandidates(i == 0, localAnswer->GetMediaSection(i),
+ "Local answer after gathering should have an end-of-candidates only for"
+ " level 0.");
+ }
+
+ SetRemoteAnswer(answer);
+ mAnsCandidates.Trickle(mSessionOff);
+
+ UniquePtr<Sdp> remoteAnswer(Parse(mSessionOff.GetRemoteDescription()));
+ for (size_t i = 0; i < remoteAnswer->GetMediaSectionCount(); ++i) {
+ mAnsCandidates.CheckRtpCandidates(
+ i == 0, remoteAnswer->GetMediaSection(i), i,
+ "Remote answer after trickle should have RTP candidates on level 0.");
+ mAnsCandidates.CheckDefaultRtpCandidate(
+ false, remoteAnswer->GetMediaSection(i), i,
+ "Remote answer after trickle should not have a default RTP candidate.");
+ mAnsCandidates.CheckRtcpCandidates(
+ false, remoteAnswer->GetMediaSection(i), i,
+ "Remote answer after trickle should not have RTCP candidates "
+ "(because we're answering with rtcp-mux)");
+ mAnsCandidates.CheckDefaultRtcpCandidate(
+ false, remoteAnswer->GetMediaSection(i), i,
+ "Remote answer after trickle should not have a default RTCP "
+ "candidate.");
+ CheckEndOfCandidates(false, remoteAnswer->GetMediaSection(i),
+ "Remote answer after trickle should not have an end-of-candidates.");
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationWithCandidates)
+{
+ AddTracks(mSessionOff);
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ mOffCandidates.Gather(mSessionOff, types);
+ SetRemoteOffer(offer);
+ mOffCandidates.Trickle(mSessionAns);
+ AddTracks(mSessionAns);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ mAnsCandidates.Gather(mSessionAns, types);
+ SetRemoteAnswer(answer);
+ mAnsCandidates.Trickle(mSessionOff);
+
+ offer = CreateOffer();
+ SetLocalOffer(offer);
+
+ UniquePtr<Sdp> parsedOffer(Parse(offer));
+ for (size_t i = 0; i < parsedOffer->GetMediaSectionCount(); ++i) {
+ mOffCandidates.CheckRtpCandidates(
+ i == 0, parsedOffer->GetMediaSection(i), i,
+ "Local reoffer before gathering should have RTP candidates on level 0"
+ " only.");
+ mOffCandidates.CheckDefaultRtpCandidate(
+ i == 0, parsedOffer->GetMediaSection(i), 0,
+ "Local reoffer before gathering should have a default RTP candidate "
+ "on level 0 only.");
+ mOffCandidates.CheckRtcpCandidates(
+ false, parsedOffer->GetMediaSection(i), i,
+ "Local reoffer before gathering should not have RTCP candidates.");
+ mOffCandidates.CheckDefaultRtcpCandidate(
+ false, parsedOffer->GetMediaSection(i), i,
+ "Local reoffer before gathering should not have a default RTCP "
+ "candidate.");
+ CheckEndOfCandidates(false, parsedOffer->GetMediaSection(i),
+ "Local reoffer before gathering should not have an end-of-candidates.");
+ }
+
+ // mSessionAns should generate a reoffer that is similar
+ std::string otherOffer;
+ JsepOfferOptions defaultOptions;
+ nsresult rv = mSessionAns.CreateOffer(defaultOptions, &otherOffer);
+ ASSERT_EQ(NS_OK, rv);
+ parsedOffer = Parse(otherOffer);
+ for (size_t i = 0; i < parsedOffer->GetMediaSectionCount(); ++i) {
+ mAnsCandidates.CheckRtpCandidates(
+ i == 0, parsedOffer->GetMediaSection(i), i,
+ "Local reoffer before gathering should have RTP candidates on level 0"
+ " only. (previous answerer)");
+ mAnsCandidates.CheckDefaultRtpCandidate(
+ i == 0, parsedOffer->GetMediaSection(i), 0,
+ "Local reoffer before gathering should have a default RTP candidate "
+ "on level 0 only. (previous answerer)");
+ mAnsCandidates.CheckRtcpCandidates(
+ false, parsedOffer->GetMediaSection(i), i,
+ "Local reoffer before gathering should not have RTCP candidates."
+ " (previous answerer)");
+ mAnsCandidates.CheckDefaultRtcpCandidate(
+ false, parsedOffer->GetMediaSection(i), i,
+ "Local reoffer before gathering should not have a default RTCP "
+ "candidate. (previous answerer)");
+ CheckEndOfCandidates(false, parsedOffer->GetMediaSection(i),
+ "Local reoffer before gathering should not have an end-of-candidates. "
+ "(previous answerer)");
+ }
+
+ // Ok, let's continue with the renegotiation
+ SetRemoteOffer(offer);
+
+ // PeerConnection will not re-gather for RTP, but it will for RTCP in case
+ // the answerer decides to turn off rtcp-mux.
+ if (types[0] != SdpMediaSection::kApplication) {
+ mOffCandidates.Gather(mSessionOff, 0, RTCP);
+ }
+
+ // Since the remaining levels were bundled, PeerConnection will re-gather for
+ // both RTP and RTCP, in case the answerer rejects bundle.
+ for (size_t level = 1; level < types.size(); ++level) {
+ mOffCandidates.Gather(mSessionOff, level, RTP);
+ if (types[level] != SdpMediaSection::kApplication) {
+ mOffCandidates.Gather(mSessionOff, level, RTCP);
+ }
+ }
+ mOffCandidates.FinishGathering(mSessionOff);
+
+ mOffCandidates.Trickle(mSessionAns);
+
+ UniquePtr<Sdp> localOffer(Parse(mSessionOff.GetLocalDescription()));
+ for (size_t i = 0; i < localOffer->GetMediaSectionCount(); ++i) {
+ mOffCandidates.CheckRtpCandidates(
+ true, localOffer->GetMediaSection(i), i,
+ "Local reoffer after gathering should have RTP candidates.");
+ mOffCandidates.CheckDefaultRtpCandidate(
+ true, localOffer->GetMediaSection(i), i,
+ "Local reoffer after gathering should have a default RTP candidate.");
+ mOffCandidates.CheckRtcpCandidates(
+ types[i] != SdpMediaSection::kApplication,
+ localOffer->GetMediaSection(i), i,
+ "Local reoffer after gathering should have RTCP candidates "
+ "(unless m=application)");
+ mOffCandidates.CheckDefaultRtcpCandidate(
+ types[i] != SdpMediaSection::kApplication,
+ localOffer->GetMediaSection(i), i,
+ "Local reoffer after gathering should have a default RTCP candidate "
+ "(unless m=application)");
+ CheckEndOfCandidates(true, localOffer->GetMediaSection(i),
+ "Local reoffer after gathering should have an end-of-candidates.");
+ }
+
+ UniquePtr<Sdp> remoteOffer(Parse(mSessionAns.GetRemoteDescription()));
+ for (size_t i = 0; i < remoteOffer->GetMediaSectionCount(); ++i) {
+ mOffCandidates.CheckRtpCandidates(
+ true, remoteOffer->GetMediaSection(i), i,
+ "Remote reoffer after trickle should have RTP candidates.");
+ mOffCandidates.CheckDefaultRtpCandidate(
+ i == 0, remoteOffer->GetMediaSection(i), i,
+ "Remote reoffer should have a default RTP candidate on level 0 "
+ "(because it was gathered last offer/answer).");
+ mOffCandidates.CheckRtcpCandidates(
+ types[i] != SdpMediaSection::kApplication,
+ remoteOffer->GetMediaSection(i), i,
+ "Remote reoffer after trickle should have RTCP candidates.");
+ mOffCandidates.CheckDefaultRtcpCandidate(
+ false, remoteOffer->GetMediaSection(i), i,
+ "Remote reoffer should not have a default RTCP candidate.");
+ CheckEndOfCandidates(false, remoteOffer->GetMediaSection(i),
+ "Remote reoffer should not have an end-of-candidates.");
+ }
+
+ answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+ // No candidates should be gathered at the answerer, but default candidates
+ // should be set.
+ mAnsCandidates.FinishGathering(mSessionAns);
+
+ UniquePtr<Sdp> localAnswer(Parse(mSessionAns.GetLocalDescription()));
+ for (size_t i = 0; i < localAnswer->GetMediaSectionCount(); ++i) {
+ mAnsCandidates.CheckRtpCandidates(
+ i == 0, localAnswer->GetMediaSection(i), i,
+ "Local reanswer after gathering should have RTP candidates on level "
+ "0.");
+ mAnsCandidates.CheckDefaultRtpCandidate(
+ true, localAnswer->GetMediaSection(i), 0,
+ "Local reanswer after gathering should have a default RTP candidate "
+ "on all levels that matches transport level 0.");
+ mAnsCandidates.CheckRtcpCandidates(
+ false, localAnswer->GetMediaSection(i), i,
+ "Local reanswer after gathering should not have RTCP candidates "
+ "(because we're reanswering with rtcp-mux)");
+ mAnsCandidates.CheckDefaultRtcpCandidate(
+ false, localAnswer->GetMediaSection(i), i,
+ "Local reanswer after gathering should not have a default RTCP "
+ "candidate (because we're reanswering with rtcp-mux)");
+ CheckEndOfCandidates(i == 0, localAnswer->GetMediaSection(i),
+ "Local reanswer after gathering should have an end-of-candidates only "
+ "for level 0.");
+ }
+
+ UniquePtr<Sdp> remoteAnswer(Parse(mSessionOff.GetRemoteDescription()));
+ for (size_t i = 0; i < localAnswer->GetMediaSectionCount(); ++i) {
+ mAnsCandidates.CheckRtpCandidates(
+ i == 0, remoteAnswer->GetMediaSection(i), i,
+ "Remote reanswer after trickle should have RTP candidates on level 0.");
+ mAnsCandidates.CheckDefaultRtpCandidate(
+ i == 0, remoteAnswer->GetMediaSection(i), i,
+ "Remote reanswer should have a default RTP candidate on level 0 "
+ "(because it was gathered last offer/answer).");
+ mAnsCandidates.CheckRtcpCandidates(
+ false, remoteAnswer->GetMediaSection(i), i,
+ "Remote reanswer after trickle should not have RTCP candidates "
+ "(because we're reanswering with rtcp-mux)");
+ mAnsCandidates.CheckDefaultRtcpCandidate(
+ false, remoteAnswer->GetMediaSection(i), i,
+ "Remote reanswer after trickle should not have a default RTCP "
+ "candidate.");
+ CheckEndOfCandidates(false, remoteAnswer->GetMediaSection(i),
+ "Remote reanswer after trickle should not have an end-of-candidates.");
+ }
+}
+
+TEST_P(JsepSessionTest, RenegotiationAnswererSendonly)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ OfferAnswer();
+
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+
+ UniquePtr<Sdp> parsedAnswer(Parse(answer));
+ for (size_t i = 0; i < parsedAnswer->GetMediaSectionCount(); ++i) {
+ SdpMediaSection& msection = parsedAnswer->GetMediaSection(i);
+ if (msection.GetMediaType() != SdpMediaSection::kApplication) {
+ msection.SetReceiving(false);
+ }
+ }
+
+ answer = parsedAnswer->ToString();
+
+ SetRemoteAnswer(answer);
+
+ for (const RefPtr<JsepTrack>& track : mSessionOff.GetLocalTracks()) {
+ if (track->GetMediaType() != SdpMediaSection::kApplication) {
+ ASSERT_FALSE(track->GetActive());
+ }
+ }
+
+ ASSERT_EQ(types.size(), mSessionOff.GetNegotiatedTrackPairs().size());
+}
+
+TEST_P(JsepSessionTest, RenegotiationAnswererInactive)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ OfferAnswer();
+
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+
+ UniquePtr<Sdp> parsedAnswer(Parse(answer));
+ for (size_t i = 0; i < parsedAnswer->GetMediaSectionCount(); ++i) {
+ SdpMediaSection& msection = parsedAnswer->GetMediaSection(i);
+ if (msection.GetMediaType() != SdpMediaSection::kApplication) {
+ msection.SetReceiving(false);
+ msection.SetSending(false);
+ }
+ }
+
+ answer = parsedAnswer->ToString();
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS); // Won't have answerer tracks
+
+ for (const RefPtr<JsepTrack>& track : mSessionOff.GetLocalTracks()) {
+ if (track->GetMediaType() != SdpMediaSection::kApplication) {
+ ASSERT_FALSE(track->GetActive());
+ }
+ }
+
+ ASSERT_EQ(types.size(), mSessionOff.GetNegotiatedTrackPairs().size());
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ Variants,
+ JsepSessionTest,
+ ::testing::Values("audio",
+ "video",
+ "datachannel",
+ "audio,video",
+ "video,audio",
+ "audio,datachannel",
+ "video,datachannel",
+ "video,audio,datachannel",
+ "audio,video,datachannel",
+ "datachannel,audio",
+ "datachannel,video",
+ "datachannel,audio,video",
+ "datachannel,video,audio",
+ "audio,datachannel,video",
+ "video,datachannel,audio",
+ "audio,audio",
+ "video,video",
+ "audio,audio,video",
+ "audio,video,video",
+ "audio,audio,video,video",
+ "audio,audio,video,video,datachannel"));
+
+// offerToReceiveXxx variants
+
+TEST_F(JsepSessionTest, OfferAnswerRecvOnlyLines)
+{
+ JsepOfferOptions options;
+ options.mOfferToReceiveAudio = Some(static_cast<size_t>(1U));
+ options.mOfferToReceiveVideo = Some(static_cast<size_t>(2U));
+ options.mDontOfferDataChannel = Some(true);
+ std::string offer = CreateOffer(Some(options));
+
+ UniquePtr<Sdp> parsedOffer(Parse(offer));
+ ASSERT_TRUE(!!parsedOffer);
+
+ ASSERT_EQ(3U, parsedOffer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ parsedOffer->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ parsedOffer->GetMediaSection(0).GetAttributeList().GetDirection());
+ ASSERT_TRUE(parsedOffer->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ parsedOffer->GetMediaSection(1).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ parsedOffer->GetMediaSection(1).GetAttributeList().GetDirection());
+ ASSERT_TRUE(parsedOffer->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ parsedOffer->GetMediaSection(2).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ parsedOffer->GetMediaSection(2).GetAttributeList().GetDirection());
+ ASSERT_TRUE(parsedOffer->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+
+ ASSERT_TRUE(parsedOffer->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ ASSERT_TRUE(parsedOffer->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ ASSERT_TRUE(parsedOffer->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ AddTracks(mSessionAns, "audio,video");
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+
+ std::string answer = CreateAnswer();
+ UniquePtr<Sdp> parsedAnswer(Parse(answer));
+
+ ASSERT_EQ(3U, parsedAnswer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ parsedAnswer->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kSendonly,
+ parsedAnswer->GetMediaSection(0).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ parsedAnswer->GetMediaSection(1).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kSendonly,
+ parsedAnswer->GetMediaSection(1).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ parsedAnswer->GetMediaSection(2).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kInactive,
+ parsedAnswer->GetMediaSection(2).GetAttributeList().GetDirection());
+
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ std::vector<JsepTrackPair> trackPairs(mSessionOff.GetNegotiatedTrackPairs());
+ ASSERT_EQ(2U, trackPairs.size());
+ for (auto pair : trackPairs) {
+ auto ssrcs = parsedOffer->GetMediaSection(pair.mLevel).GetAttributeList()
+ .GetSsrc().mSsrcs;
+ ASSERT_EQ(1U, ssrcs.size());
+ ASSERT_EQ(pair.mRecvonlySsrc, ssrcs.front().ssrc);
+ }
+}
+
+TEST_F(JsepSessionTest, OfferAnswerSendOnlyLines)
+{
+ AddTracks(mSessionOff, "audio,video,video");
+
+ JsepOfferOptions options;
+ options.mOfferToReceiveAudio = Some(static_cast<size_t>(0U));
+ options.mOfferToReceiveVideo = Some(static_cast<size_t>(1U));
+ options.mDontOfferDataChannel = Some(true);
+ std::string offer = CreateOffer(Some(options));
+
+ UniquePtr<Sdp> outputSdp(Parse(offer));
+ ASSERT_TRUE(!!outputSdp);
+
+ ASSERT_EQ(3U, outputSdp->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ outputSdp->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kSendonly,
+ outputSdp->GetMediaSection(0).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ outputSdp->GetMediaSection(1).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv,
+ outputSdp->GetMediaSection(1).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ outputSdp->GetMediaSection(2).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kSendonly,
+ outputSdp->GetMediaSection(2).GetAttributeList().GetDirection());
+
+ ASSERT_TRUE(outputSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ ASSERT_TRUE(outputSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ ASSERT_TRUE(outputSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ AddTracks(mSessionAns, "audio,video");
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+
+ std::string answer = CreateAnswer();
+ outputSdp = Parse(answer);
+
+ ASSERT_EQ(3U, outputSdp->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ outputSdp->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ outputSdp->GetMediaSection(0).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ outputSdp->GetMediaSection(1).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv,
+ outputSdp->GetMediaSection(1).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ outputSdp->GetMediaSection(2).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ outputSdp->GetMediaSection(2).GetAttributeList().GetDirection());
+}
+
+TEST_F(JsepSessionTest, OfferToReceiveAudioNotUsed)
+{
+ JsepOfferOptions options;
+ options.mOfferToReceiveAudio = Some<size_t>(1);
+
+ OfferAnswer(CHECK_SUCCESS, Some(options));
+
+ UniquePtr<Sdp> offer(Parse(mSessionOff.GetLocalDescription()));
+ ASSERT_TRUE(offer.get());
+ ASSERT_EQ(1U, offer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ offer->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ offer->GetMediaSection(0).GetAttributeList().GetDirection());
+
+ UniquePtr<Sdp> answer(Parse(mSessionAns.GetLocalDescription()));
+ ASSERT_TRUE(answer.get());
+ ASSERT_EQ(1U, answer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ answer->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kInactive,
+ answer->GetMediaSection(0).GetAttributeList().GetDirection());
+}
+
+TEST_F(JsepSessionTest, OfferToReceiveVideoNotUsed)
+{
+ JsepOfferOptions options;
+ options.mOfferToReceiveVideo = Some<size_t>(1);
+
+ OfferAnswer(CHECK_SUCCESS, Some(options));
+
+ UniquePtr<Sdp> offer(Parse(mSessionOff.GetLocalDescription()));
+ ASSERT_TRUE(offer.get());
+ ASSERT_EQ(1U, offer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ offer->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ offer->GetMediaSection(0).GetAttributeList().GetDirection());
+
+ UniquePtr<Sdp> answer(Parse(mSessionAns.GetLocalDescription()));
+ ASSERT_TRUE(answer.get());
+ ASSERT_EQ(1U, answer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ answer->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpDirectionAttribute::kInactive,
+ answer->GetMediaSection(0).GetAttributeList().GetDirection());
+}
+
+TEST_F(JsepSessionTest, CreateOfferNoDatachannelDefault)
+{
+ RefPtr<JsepTrack> msta(
+ new JsepTrack(SdpMediaSection::kAudio, "offerer_stream", "a1"));
+ mSessionOff.AddTrack(msta);
+
+ RefPtr<JsepTrack> mstv1(
+ new JsepTrack(SdpMediaSection::kVideo, "offerer_stream", "v1"));
+ mSessionOff.AddTrack(mstv1);
+
+ std::string offer = CreateOffer();
+
+ UniquePtr<Sdp> outputSdp(Parse(offer));
+ ASSERT_TRUE(!!outputSdp);
+
+ ASSERT_EQ(2U, outputSdp->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kAudio,
+ outputSdp->GetMediaSection(0).GetMediaType());
+ ASSERT_EQ(SdpMediaSection::kVideo,
+ outputSdp->GetMediaSection(1).GetMediaType());
+}
+
+TEST_F(JsepSessionTest, ValidateOfferedVideoCodecParams)
+{
+ types.push_back(SdpMediaSection::kAudio);
+ types.push_back(SdpMediaSection::kVideo);
+
+ RefPtr<JsepTrack> msta(
+ new JsepTrack(SdpMediaSection::kAudio, "offerer_stream", "a1"));
+ mSessionOff.AddTrack(msta);
+ RefPtr<JsepTrack> mstv1(
+ new JsepTrack(SdpMediaSection::kVideo, "offerer_stream", "v2"));
+ mSessionOff.AddTrack(mstv1);
+
+ std::string offer = CreateOffer();
+
+ UniquePtr<Sdp> outputSdp(Parse(offer));
+ ASSERT_TRUE(!!outputSdp);
+
+ ASSERT_EQ(2U, outputSdp->GetMediaSectionCount());
+ auto& video_section = outputSdp->GetMediaSection(1);
+ ASSERT_EQ(SdpMediaSection::kVideo, video_section.GetMediaType());
+ auto& video_attrs = video_section.GetAttributeList();
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv, video_attrs.GetDirection());
+
+ ASSERT_EQ(6U, video_section.GetFormats().size());
+ ASSERT_EQ("120", video_section.GetFormats()[0]);
+ ASSERT_EQ("121", video_section.GetFormats()[1]);
+ ASSERT_EQ("126", video_section.GetFormats()[2]);
+ ASSERT_EQ("97", video_section.GetFormats()[3]);
+ ASSERT_EQ("122", video_section.GetFormats()[4]);
+ ASSERT_EQ("123", video_section.GetFormats()[5]);
+
+ // Validate rtpmap
+ ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kRtpmapAttribute));
+ auto& rtpmaps = video_attrs.GetRtpmap();
+ ASSERT_TRUE(rtpmaps.HasEntry("120"));
+ ASSERT_TRUE(rtpmaps.HasEntry("121"));
+ ASSERT_TRUE(rtpmaps.HasEntry("126"));
+ ASSERT_TRUE(rtpmaps.HasEntry("97"));
+ ASSERT_TRUE(rtpmaps.HasEntry("122"));
+ ASSERT_TRUE(rtpmaps.HasEntry("123"));
+
+ auto& vp8_entry = rtpmaps.GetEntry("120");
+ auto& vp9_entry = rtpmaps.GetEntry("121");
+ auto& h264_1_entry = rtpmaps.GetEntry("126");
+ auto& h264_0_entry = rtpmaps.GetEntry("97");
+ auto& red_0_entry = rtpmaps.GetEntry("122");
+ auto& ulpfec_0_entry = rtpmaps.GetEntry("123");
+
+ ASSERT_EQ("VP8", vp8_entry.name);
+ ASSERT_EQ("VP9", vp9_entry.name);
+ ASSERT_EQ("H264", h264_1_entry.name);
+ ASSERT_EQ("H264", h264_0_entry.name);
+ ASSERT_EQ("red", red_0_entry.name);
+ ASSERT_EQ("ulpfec", ulpfec_0_entry.name);
+
+ // Validate fmtps
+ ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kFmtpAttribute));
+ auto& fmtps = video_attrs.GetFmtp().mFmtps;
+
+ ASSERT_EQ(5U, fmtps.size());
+
+ // VP8
+ const SdpFmtpAttributeList::Parameters* vp8_params =
+ video_section.FindFmtp("120");
+ ASSERT_TRUE(vp8_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kVP8, vp8_params->codec_type);
+
+ auto& parsed_vp8_params =
+ *static_cast<const SdpFmtpAttributeList::VP8Parameters*>(vp8_params);
+
+ ASSERT_EQ((uint32_t)12288, parsed_vp8_params.max_fs);
+ ASSERT_EQ((uint32_t)60, parsed_vp8_params.max_fr);
+
+ // VP9
+ const SdpFmtpAttributeList::Parameters* vp9_params =
+ video_section.FindFmtp("121");
+ ASSERT_TRUE(vp9_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kVP9, vp9_params->codec_type);
+
+ auto& parsed_vp9_params =
+ *static_cast<const SdpFmtpAttributeList::VP8Parameters*>(vp9_params);
+
+ ASSERT_EQ((uint32_t)12288, parsed_vp9_params.max_fs);
+ ASSERT_EQ((uint32_t)60, parsed_vp9_params.max_fr);
+
+ // H264 packetization mode 1
+ const SdpFmtpAttributeList::Parameters* h264_1_params =
+ video_section.FindFmtp("126");
+ ASSERT_TRUE(h264_1_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kH264, h264_1_params->codec_type);
+
+ auto& parsed_h264_1_params =
+ *static_cast<const SdpFmtpAttributeList::H264Parameters*>(h264_1_params);
+
+ ASSERT_EQ((uint32_t)0x42e00d, parsed_h264_1_params.profile_level_id);
+ ASSERT_TRUE(parsed_h264_1_params.level_asymmetry_allowed);
+ ASSERT_EQ(1U, parsed_h264_1_params.packetization_mode);
+
+ // H264 packetization mode 0
+ const SdpFmtpAttributeList::Parameters* h264_0_params =
+ video_section.FindFmtp("97");
+ ASSERT_TRUE(h264_0_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kH264, h264_0_params->codec_type);
+
+ auto& parsed_h264_0_params =
+ *static_cast<const SdpFmtpAttributeList::H264Parameters*>(h264_0_params);
+
+ ASSERT_EQ((uint32_t)0x42e00d, parsed_h264_0_params.profile_level_id);
+ ASSERT_TRUE(parsed_h264_0_params.level_asymmetry_allowed);
+ ASSERT_EQ(0U, parsed_h264_0_params.packetization_mode);
+
+ // red
+ const SdpFmtpAttributeList::Parameters* red_params =
+ video_section.FindFmtp("122");
+ ASSERT_TRUE(red_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kRed, red_params->codec_type);
+
+ auto& parsed_red_params =
+ *static_cast<const SdpFmtpAttributeList::RedParameters*>(red_params);
+ ASSERT_EQ(5U, parsed_red_params.encodings.size());
+ ASSERT_EQ(120, parsed_red_params.encodings[0]);
+ ASSERT_EQ(121, parsed_red_params.encodings[1]);
+ ASSERT_EQ(126, parsed_red_params.encodings[2]);
+ ASSERT_EQ(97, parsed_red_params.encodings[3]);
+ ASSERT_EQ(123, parsed_red_params.encodings[4]);
+}
+
+TEST_F(JsepSessionTest, ValidateOfferedAudioCodecParams)
+{
+ types.push_back(SdpMediaSection::kAudio);
+ types.push_back(SdpMediaSection::kVideo);
+
+ RefPtr<JsepTrack> msta(
+ new JsepTrack(SdpMediaSection::kAudio, "offerer_stream", "a1"));
+ mSessionOff.AddTrack(msta);
+ RefPtr<JsepTrack> mstv1(
+ new JsepTrack(SdpMediaSection::kVideo, "offerer_stream", "v2"));
+ mSessionOff.AddTrack(mstv1);
+
+ std::string offer = CreateOffer();
+
+ UniquePtr<Sdp> outputSdp(Parse(offer));
+ ASSERT_TRUE(!!outputSdp);
+
+ ASSERT_EQ(2U, outputSdp->GetMediaSectionCount());
+ auto& audio_section = outputSdp->GetMediaSection(0);
+ ASSERT_EQ(SdpMediaSection::kAudio, audio_section.GetMediaType());
+ auto& audio_attrs = audio_section.GetAttributeList();
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv, audio_attrs.GetDirection());
+ ASSERT_EQ(5U, audio_section.GetFormats().size());
+ ASSERT_EQ("109", audio_section.GetFormats()[0]);
+ ASSERT_EQ("9", audio_section.GetFormats()[1]);
+ ASSERT_EQ("0", audio_section.GetFormats()[2]);
+ ASSERT_EQ("8", audio_section.GetFormats()[3]);
+ ASSERT_EQ("101", audio_section.GetFormats()[4]);
+
+ // Validate rtpmap
+ ASSERT_TRUE(audio_attrs.HasAttribute(SdpAttribute::kRtpmapAttribute));
+ auto& rtpmaps = audio_attrs.GetRtpmap();
+ ASSERT_TRUE(rtpmaps.HasEntry("109"));
+ ASSERT_TRUE(rtpmaps.HasEntry("9"));
+ ASSERT_TRUE(rtpmaps.HasEntry("0"));
+ ASSERT_TRUE(rtpmaps.HasEntry("8"));
+ ASSERT_TRUE(rtpmaps.HasEntry("101"));
+
+ auto& opus_entry = rtpmaps.GetEntry("109");
+ auto& g722_entry = rtpmaps.GetEntry("9");
+ auto& pcmu_entry = rtpmaps.GetEntry("0");
+ auto& pcma_entry = rtpmaps.GetEntry("8");
+ auto& telephone_event_entry = rtpmaps.GetEntry("101");
+
+ ASSERT_EQ("opus", opus_entry.name);
+ ASSERT_EQ("G722", g722_entry.name);
+ ASSERT_EQ("PCMU", pcmu_entry.name);
+ ASSERT_EQ("PCMA", pcma_entry.name);
+ ASSERT_EQ("telephone-event", telephone_event_entry.name);
+
+ // Validate fmtps
+ ASSERT_TRUE(audio_attrs.HasAttribute(SdpAttribute::kFmtpAttribute));
+ auto& fmtps = audio_attrs.GetFmtp().mFmtps;
+
+ ASSERT_EQ(2U, fmtps.size());
+
+ // opus
+ const SdpFmtpAttributeList::Parameters* opus_params =
+ audio_section.FindFmtp("109");
+ ASSERT_TRUE(opus_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kOpus, opus_params->codec_type);
+
+ auto& parsed_opus_params =
+ *static_cast<const SdpFmtpAttributeList::OpusParameters*>(opus_params);
+
+ ASSERT_EQ((uint32_t)48000, parsed_opus_params.maxplaybackrate);
+ ASSERT_EQ((uint32_t)1, parsed_opus_params.stereo);
+ ASSERT_EQ((uint32_t)0, parsed_opus_params.useInBandFec);
+
+ // dtmf
+ const SdpFmtpAttributeList::Parameters* dtmf_params =
+ audio_section.FindFmtp("101");
+ ASSERT_TRUE(dtmf_params);
+ ASSERT_EQ(SdpRtpmapAttributeList::kTelephoneEvent, dtmf_params->codec_type);
+
+ auto& parsed_dtmf_params =
+ *static_cast<const SdpFmtpAttributeList::TelephoneEventParameters*>
+ (dtmf_params);
+
+ ASSERT_EQ("0-15", parsed_dtmf_params.dtmfTones);
+}
+
+TEST_F(JsepSessionTest, ValidateAnsweredCodecParams)
+{
+ // TODO(bug 1099351): Once fixed, we can allow red in this offer,
+ // which will also cause multiple codecs in answer. For now,
+ // red/ulpfec for video are behind a pref to mitigate potential for
+ // errors.
+ SetCodecEnabled(mSessionOff, "red", false);
+ for (auto i = mSessionAns.Codecs().begin(); i != mSessionAns.Codecs().end();
+ ++i) {
+ auto* codec = *i;
+ if (codec->mName == "H264") {
+ JsepVideoCodecDescription* h264 =
+ static_cast<JsepVideoCodecDescription*>(codec);
+ h264->mProfileLevelId = 0x42a00d;
+ // Switch up the pts
+ if (h264->mDefaultPt == "126") {
+ h264->mDefaultPt = "97";
+ } else {
+ h264->mDefaultPt = "126";
+ }
+ }
+ }
+
+ types.push_back(SdpMediaSection::kAudio);
+ types.push_back(SdpMediaSection::kVideo);
+
+ RefPtr<JsepTrack> msta(
+ new JsepTrack(SdpMediaSection::kAudio, "offerer_stream", "a1"));
+ mSessionOff.AddTrack(msta);
+ RefPtr<JsepTrack> mstv1(
+ new JsepTrack(SdpMediaSection::kVideo, "offerer_stream", "v1"));
+ mSessionOff.AddTrack(mstv1);
+
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+
+ RefPtr<JsepTrack> msta_ans(
+ new JsepTrack(SdpMediaSection::kAudio, "answerer_stream", "a1"));
+ mSessionAns.AddTrack(msta);
+ RefPtr<JsepTrack> mstv1_ans(
+ new JsepTrack(SdpMediaSection::kVideo, "answerer_stream", "v1"));
+ mSessionAns.AddTrack(mstv1);
+
+ std::string answer = CreateAnswer();
+
+ UniquePtr<Sdp> outputSdp(Parse(answer));
+ ASSERT_TRUE(!!outputSdp);
+
+ ASSERT_EQ(2U, outputSdp->GetMediaSectionCount());
+ auto& video_section = outputSdp->GetMediaSection(1);
+ ASSERT_EQ(SdpMediaSection::kVideo, video_section.GetMediaType());
+ auto& video_attrs = video_section.GetAttributeList();
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv, video_attrs.GetDirection());
+
+ // TODO(bug 1099351): Once fixed, this stuff will need to be updated.
+ ASSERT_EQ(1U, video_section.GetFormats().size());
+ // ASSERT_EQ(3U, video_section.GetFormats().size());
+ ASSERT_EQ("120", video_section.GetFormats()[0]);
+ // ASSERT_EQ("121", video_section.GetFormats()[1]);
+ // ASSERT_EQ("126", video_section.GetFormats()[2]);
+ // ASSERT_EQ("97", video_section.GetFormats()[3]);
+
+ // Validate rtpmap
+ ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kRtpmapAttribute));
+ auto& rtpmaps = video_attrs.GetRtpmap();
+ ASSERT_TRUE(rtpmaps.HasEntry("120"));
+ //ASSERT_TRUE(rtpmaps.HasEntry("121"));
+ // ASSERT_TRUE(rtpmaps.HasEntry("126"));
+ // ASSERT_TRUE(rtpmaps.HasEntry("97"));
+
+ auto& vp8_entry = rtpmaps.GetEntry("120");
+ //auto& vp9_entry = rtpmaps.GetEntry("121");
+ // auto& h264_1_entry = rtpmaps.GetEntry("126");
+ // auto& h264_0_entry = rtpmaps.GetEntry("97");
+
+ ASSERT_EQ("VP8", vp8_entry.name);
+ //ASSERT_EQ("VP9", vp9_entry.name);
+ // ASSERT_EQ("H264", h264_1_entry.name);
+ // ASSERT_EQ("H264", h264_0_entry.name);
+
+ // Validate fmtps
+ ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kFmtpAttribute));
+ auto& fmtps = video_attrs.GetFmtp().mFmtps;
+
+ ASSERT_EQ(1U, fmtps.size());
+ // ASSERT_EQ(3U, fmtps.size());
+
+ // VP8
+ ASSERT_EQ("120", fmtps[0].format);
+ ASSERT_TRUE(!!fmtps[0].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kVP8, fmtps[0].parameters->codec_type);
+
+ auto& parsed_vp8_params =
+ *static_cast<const SdpFmtpAttributeList::VP8Parameters*>(
+ fmtps[0].parameters.get());
+
+ ASSERT_EQ((uint32_t)12288, parsed_vp8_params.max_fs);
+ ASSERT_EQ((uint32_t)60, parsed_vp8_params.max_fr);
+
+
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+
+ auto offerPairs = mSessionOff.GetNegotiatedTrackPairs();
+ ASSERT_EQ(2U, offerPairs.size());
+ ASSERT_TRUE(offerPairs[1].mSending);
+ ASSERT_TRUE(offerPairs[1].mReceiving);
+ ASSERT_TRUE(offerPairs[1].mSending->GetNegotiatedDetails());
+ ASSERT_TRUE(offerPairs[1].mReceiving->GetNegotiatedDetails());
+ ASSERT_EQ(1U,
+ offerPairs[1].mSending->GetNegotiatedDetails()->GetEncoding(0)
+ .GetCodecs().size());
+ ASSERT_EQ(1U,
+ offerPairs[1].mReceiving->GetNegotiatedDetails()->GetEncoding(0)
+ .GetCodecs().size());
+
+ auto answerPairs = mSessionAns.GetNegotiatedTrackPairs();
+ ASSERT_EQ(2U, answerPairs.size());
+ ASSERT_TRUE(answerPairs[1].mSending);
+ ASSERT_TRUE(answerPairs[1].mReceiving);
+ ASSERT_TRUE(answerPairs[1].mSending->GetNegotiatedDetails());
+ ASSERT_TRUE(answerPairs[1].mReceiving->GetNegotiatedDetails());
+ ASSERT_EQ(1U,
+ answerPairs[1].mSending->GetNegotiatedDetails()->GetEncoding(0)
+ .GetCodecs().size());
+ ASSERT_EQ(1U,
+ answerPairs[1].mReceiving->GetNegotiatedDetails()->GetEncoding(0)
+ .GetCodecs().size());
+
+#if 0
+ // H264 packetization mode 1
+ ASSERT_EQ("126", fmtps[1].format);
+ ASSERT_TRUE(fmtps[1].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kH264, fmtps[1].parameters->codec_type);
+
+ auto& parsed_h264_1_params =
+ *static_cast<const SdpFmtpAttributeList::H264Parameters*>(
+ fmtps[1].parameters.get());
+
+ ASSERT_EQ((uint32_t)0x42a00d, parsed_h264_1_params.profile_level_id);
+ ASSERT_TRUE(parsed_h264_1_params.level_asymmetry_allowed);
+ ASSERT_EQ(1U, parsed_h264_1_params.packetization_mode);
+
+ // H264 packetization mode 0
+ ASSERT_EQ("97", fmtps[2].format);
+ ASSERT_TRUE(fmtps[2].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kH264, fmtps[2].parameters->codec_type);
+
+ auto& parsed_h264_0_params =
+ *static_cast<const SdpFmtpAttributeList::H264Parameters*>(
+ fmtps[2].parameters.get());
+
+ ASSERT_EQ((uint32_t)0x42a00d, parsed_h264_0_params.profile_level_id);
+ ASSERT_TRUE(parsed_h264_0_params.level_asymmetry_allowed);
+ ASSERT_EQ(0U, parsed_h264_0_params.packetization_mode);
+#endif
+}
+
+static void
+Replace(const std::string& toReplace,
+ const std::string& with,
+ std::string* in)
+{
+ size_t pos = in->find(toReplace);
+ ASSERT_NE(std::string::npos, pos);
+ in->replace(pos, toReplace.size(), with);
+}
+
+static void ReplaceAll(const std::string& toReplace,
+ const std::string& with,
+ std::string* in)
+{
+ while (in->find(toReplace) != std::string::npos) {
+ Replace(toReplace, with, in);
+ }
+}
+
+static void
+GetCodec(JsepSession& session,
+ size_t pairIndex,
+ sdp::Direction direction,
+ size_t encodingIndex,
+ size_t codecIndex,
+ const JsepCodecDescription** codecOut)
+{
+ *codecOut = nullptr;
+ ASSERT_LT(pairIndex, session.GetNegotiatedTrackPairs().size());
+ JsepTrackPair pair(session.GetNegotiatedTrackPairs().front());
+ RefPtr<JsepTrack> track(
+ (direction == sdp::kSend) ? pair.mSending : pair.mReceiving);
+ ASSERT_TRUE(track);
+ ASSERT_TRUE(track->GetNegotiatedDetails());
+ ASSERT_LT(encodingIndex, track->GetNegotiatedDetails()->GetEncodingCount());
+ ASSERT_LT(codecIndex,
+ track->GetNegotiatedDetails()->GetEncoding(encodingIndex)
+ .GetCodecs().size());
+ *codecOut =
+ track->GetNegotiatedDetails()->GetEncoding(encodingIndex)
+ .GetCodecs()[codecIndex];
+}
+
+static void
+ForceH264(JsepSession& session, uint32_t profileLevelId)
+{
+ for (JsepCodecDescription* codec : session.Codecs()) {
+ if (codec->mName == "H264") {
+ JsepVideoCodecDescription* h264 =
+ static_cast<JsepVideoCodecDescription*>(codec);
+ h264->mProfileLevelId = profileLevelId;
+ } else {
+ codec->mEnabled = false;
+ }
+ }
+}
+
+TEST_F(JsepSessionTest, TestH264Negotiation)
+{
+ ForceH264(mSessionOff, 0x42e00b);
+ ForceH264(mSessionAns, 0x42e00d);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ const JsepCodecDescription* offererSendCodec;
+ GetCodec(mSessionOff, 0, sdp::kSend, 0, 0, &offererSendCodec);
+ ASSERT_TRUE(offererSendCodec);
+ ASSERT_EQ("H264", offererSendCodec->mName);
+ const JsepVideoCodecDescription* offererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(offererSendCodec));
+ ASSERT_EQ((uint32_t)0x42e00d, offererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* offererRecvCodec;
+ GetCodec(mSessionOff, 0, sdp::kRecv, 0, 0, &offererRecvCodec);
+ ASSERT_EQ("H264", offererRecvCodec->mName);
+ const JsepVideoCodecDescription* offererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(offererRecvCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, offererVideoRecvCodec->mProfileLevelId);
+
+ const JsepCodecDescription* answererSendCodec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &answererSendCodec);
+ ASSERT_TRUE(answererSendCodec);
+ ASSERT_EQ("H264", answererSendCodec->mName);
+ const JsepVideoCodecDescription* answererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererSendCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, answererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* answererRecvCodec;
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &answererRecvCodec);
+ ASSERT_EQ("H264", answererRecvCodec->mName);
+ const JsepVideoCodecDescription* answererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererRecvCodec));
+ ASSERT_EQ((uint32_t)0x42e00d, answererVideoRecvCodec->mProfileLevelId);
+}
+
+TEST_F(JsepSessionTest, TestH264NegotiationFails)
+{
+ ForceH264(mSessionOff, 0x42000b);
+ ForceH264(mSessionAns, 0x42e00d);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ ASSERT_EQ(0U, mSessionOff.GetNegotiatedTrackPairs().size());
+ ASSERT_EQ(0U, mSessionAns.GetNegotiatedTrackPairs().size());
+}
+
+TEST_F(JsepSessionTest, TestH264NegotiationOffererDefault)
+{
+ ForceH264(mSessionOff, 0x42000d);
+ ForceH264(mSessionAns, 0x42000d);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ Replace("profile-level-id=42000d",
+ "some-unknown-param=0",
+ &offer);
+
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ const JsepCodecDescription* answererSendCodec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &answererSendCodec);
+ ASSERT_TRUE(answererSendCodec);
+ ASSERT_EQ("H264", answererSendCodec->mName);
+ const JsepVideoCodecDescription* answererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererSendCodec));
+ ASSERT_EQ((uint32_t)0x420010, answererVideoSendCodec->mProfileLevelId);
+}
+
+TEST_F(JsepSessionTest, TestH264NegotiationOffererNoFmtp)
+{
+ ForceH264(mSessionOff, 0x42000d);
+ ForceH264(mSessionAns, 0x42001e);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ Replace("a=fmtp", "a=oops", &offer);
+
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ const JsepCodecDescription* answererSendCodec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &answererSendCodec);
+ ASSERT_TRUE(answererSendCodec);
+ ASSERT_EQ("H264", answererSendCodec->mName);
+ const JsepVideoCodecDescription* answererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererSendCodec));
+ ASSERT_EQ((uint32_t)0x420010, answererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* answererRecvCodec;
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &answererRecvCodec);
+ ASSERT_EQ("H264", answererRecvCodec->mName);
+ const JsepVideoCodecDescription* answererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererRecvCodec));
+ ASSERT_EQ((uint32_t)0x420010, answererVideoRecvCodec->mProfileLevelId);
+}
+
+TEST_F(JsepSessionTest, TestH264LevelAsymmetryDisallowedByOffererWithLowLevel)
+{
+ ForceH264(mSessionOff, 0x42e00b);
+ ForceH264(mSessionAns, 0x42e00d);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ Replace("level-asymmetry-allowed=1",
+ "level-asymmetry-allowed=0",
+ &offer);
+
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ // Offerer doesn't know about the shenanigans we've pulled here, so will
+ // behave normally, and we test the normal behavior elsewhere.
+
+ const JsepCodecDescription* answererSendCodec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &answererSendCodec);
+ ASSERT_TRUE(answererSendCodec);
+ ASSERT_EQ("H264", answererSendCodec->mName);
+ const JsepVideoCodecDescription* answererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererSendCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, answererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* answererRecvCodec;
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &answererRecvCodec);
+ ASSERT_EQ("H264", answererRecvCodec->mName);
+ const JsepVideoCodecDescription* answererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererRecvCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, answererVideoRecvCodec->mProfileLevelId);
+}
+
+TEST_F(JsepSessionTest, TestH264LevelAsymmetryDisallowedByOffererWithHighLevel)
+{
+ ForceH264(mSessionOff, 0x42e00d);
+ ForceH264(mSessionAns, 0x42e00b);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ Replace("level-asymmetry-allowed=1",
+ "level-asymmetry-allowed=0",
+ &offer);
+
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ // Offerer doesn't know about the shenanigans we've pulled here, so will
+ // behave normally, and we test the normal behavior elsewhere.
+
+ const JsepCodecDescription* answererSendCodec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &answererSendCodec);
+ ASSERT_TRUE(answererSendCodec);
+ ASSERT_EQ("H264", answererSendCodec->mName);
+ const JsepVideoCodecDescription* answererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererSendCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, answererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* answererRecvCodec;
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &answererRecvCodec);
+ ASSERT_EQ("H264", answererRecvCodec->mName);
+ const JsepVideoCodecDescription* answererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(answererRecvCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, answererVideoRecvCodec->mProfileLevelId);
+}
+
+TEST_F(JsepSessionTest, TestH264LevelAsymmetryDisallowedByAnswererWithLowLevel)
+{
+ ForceH264(mSessionOff, 0x42e00d);
+ ForceH264(mSessionAns, 0x42e00b);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ Replace("level-asymmetry-allowed=1",
+ "level-asymmetry-allowed=0",
+ &answer);
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ const JsepCodecDescription* offererSendCodec;
+ GetCodec(mSessionOff, 0, sdp::kSend, 0, 0, &offererSendCodec);
+ ASSERT_TRUE(offererSendCodec);
+ ASSERT_EQ("H264", offererSendCodec->mName);
+ const JsepVideoCodecDescription* offererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(offererSendCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, offererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* offererRecvCodec;
+ GetCodec(mSessionOff, 0, sdp::kRecv, 0, 0, &offererRecvCodec);
+ ASSERT_EQ("H264", offererRecvCodec->mName);
+ const JsepVideoCodecDescription* offererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(offererRecvCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, offererVideoRecvCodec->mProfileLevelId);
+
+ // Answerer doesn't know we've pulled these shenanigans, it should act as if
+ // it did not set level-asymmetry-required, and we already check that
+ // elsewhere
+}
+
+TEST_F(JsepSessionTest, TestH264LevelAsymmetryDisallowedByAnswererWithHighLevel)
+{
+ ForceH264(mSessionOff, 0x42e00b);
+ ForceH264(mSessionAns, 0x42e00d);
+
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer(CreateAnswer());
+
+ Replace("level-asymmetry-allowed=1",
+ "level-asymmetry-allowed=0",
+ &answer);
+
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+
+ const JsepCodecDescription* offererSendCodec;
+ GetCodec(mSessionOff, 0, sdp::kSend, 0, 0, &offererSendCodec);
+ ASSERT_TRUE(offererSendCodec);
+ ASSERT_EQ("H264", offererSendCodec->mName);
+ const JsepVideoCodecDescription* offererVideoSendCodec(
+ static_cast<const JsepVideoCodecDescription*>(offererSendCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, offererVideoSendCodec->mProfileLevelId);
+
+ const JsepCodecDescription* offererRecvCodec;
+ GetCodec(mSessionOff, 0, sdp::kRecv, 0, 0, &offererRecvCodec);
+ ASSERT_EQ("H264", offererRecvCodec->mName);
+ const JsepVideoCodecDescription* offererVideoRecvCodec(
+ static_cast<const JsepVideoCodecDescription*>(offererRecvCodec));
+ ASSERT_EQ((uint32_t)0x42e00b, offererVideoRecvCodec->mProfileLevelId);
+
+ // Answerer doesn't know we've pulled these shenanigans, it should act as if
+ // it did not set level-asymmetry-required, and we already check that
+ // elsewhere
+}
+
+TEST_P(JsepSessionTest, TestRejectMline)
+{
+ // We need to do this before adding tracks
+ types = BuildTypes(GetParam());
+ std::sort(types.begin(), types.end());
+
+ switch (types.front()) {
+ case SdpMediaSection::kAudio:
+ // Sabotage audio
+ EnsureNegotiationFailure(types.front(), "opus");
+ break;
+ case SdpMediaSection::kVideo:
+ // Sabotage video
+ EnsureNegotiationFailure(types.front(), "H264");
+ break;
+ case SdpMediaSection::kApplication:
+ // Sabotage datachannel
+ EnsureNegotiationFailure(types.front(), "webrtc-datachannel");
+ break;
+ default:
+ ASSERT_TRUE(false) << "Unknown media type";
+ }
+
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ std::string offer = CreateOffer();
+ mSessionOff.SetLocalDescription(kJsepSdpOffer, offer);
+ mSessionAns.SetRemoteDescription(kJsepSdpOffer, offer);
+
+ std::string answer = CreateAnswer();
+
+ UniquePtr<Sdp> outputSdp(Parse(answer));
+ ASSERT_TRUE(!!outputSdp);
+
+ ASSERT_NE(0U, outputSdp->GetMediaSectionCount());
+ SdpMediaSection* failed_section = nullptr;
+
+ for (size_t i = 0; i < outputSdp->GetMediaSectionCount(); ++i) {
+ if (outputSdp->GetMediaSection(i).GetMediaType() == types.front()) {
+ failed_section = &outputSdp->GetMediaSection(i);
+ }
+ }
+
+ ASSERT_TRUE(failed_section) << "Failed type was entirely absent from SDP";
+ auto& failed_attrs = failed_section->GetAttributeList();
+ ASSERT_EQ(SdpDirectionAttribute::kInactive, failed_attrs.GetDirection());
+ ASSERT_EQ(0U, failed_section->GetPort());
+
+ mSessionAns.SetLocalDescription(kJsepSdpAnswer, answer);
+ mSessionOff.SetRemoteDescription(kJsepSdpAnswer, answer);
+
+ size_t numRejected = std::count(types.begin(), types.end(), types.front());
+ size_t numAccepted = types.size() - numRejected;
+
+ ASSERT_EQ(numAccepted, mSessionOff.GetNegotiatedTrackPairs().size());
+ ASSERT_EQ(numAccepted, mSessionAns.GetNegotiatedTrackPairs().size());
+
+ ASSERT_EQ(types.size(), mSessionOff.GetTransports().size());
+ ASSERT_EQ(types.size(), mSessionOff.GetLocalTracks().size());
+ ASSERT_EQ(numAccepted, mSessionOff.GetRemoteTracks().size());
+
+ ASSERT_EQ(types.size(), mSessionAns.GetTransports().size());
+ ASSERT_EQ(types.size(), mSessionAns.GetLocalTracks().size());
+ ASSERT_EQ(types.size(), mSessionAns.GetRemoteTracks().size());
+}
+
+TEST_F(JsepSessionTest, CreateOfferNoMlines)
+{
+ JsepOfferOptions options;
+ std::string offer;
+ nsresult rv = mSessionOff.CreateOffer(options, &offer);
+ ASSERT_NE(NS_OK, rv);
+ ASSERT_NE("", mSessionOff.GetLastError());
+}
+
+TEST_F(JsepSessionTest, TestIceLite)
+{
+ AddTracks(mSessionOff, "audio");
+ AddTracks(mSessionAns, "audio");
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer, CHECK_SUCCESS);
+
+ UniquePtr<Sdp> parsedOffer(Parse(offer));
+ parsedOffer->GetAttributeList().SetAttribute(
+ new SdpFlagAttribute(SdpAttribute::kIceLiteAttribute));
+
+ std::ostringstream os;
+ parsedOffer->Serialize(os);
+ SetRemoteOffer(os.str(), CHECK_SUCCESS);
+
+ ASSERT_TRUE(mSessionAns.RemoteIsIceLite());
+ ASSERT_FALSE(mSessionOff.RemoteIsIceLite());
+}
+
+TEST_F(JsepSessionTest, TestIceOptions)
+{
+ AddTracks(mSessionOff, "audio");
+ AddTracks(mSessionAns, "audio");
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ ASSERT_EQ(1U, mSessionOff.GetIceOptions().size());
+ ASSERT_EQ("trickle", mSessionOff.GetIceOptions()[0]);
+
+ ASSERT_EQ(1U, mSessionAns.GetIceOptions().size());
+ ASSERT_EQ("trickle", mSessionAns.GetIceOptions()[0]);
+}
+
+TEST_F(JsepSessionTest, TestExtmap)
+{
+ AddTracks(mSessionOff, "audio");
+ AddTracks(mSessionAns, "audio");
+ // ssrc-audio-level will be extmap 1 for both
+ mSessionOff.AddAudioRtpExtension("foo"); // Default mapping of 2
+ mSessionOff.AddAudioRtpExtension("bar"); // Default mapping of 3
+ mSessionAns.AddAudioRtpExtension("bar"); // Default mapping of 2
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ UniquePtr<Sdp> parsedOffer(Parse(offer));
+ ASSERT_EQ(1U, parsedOffer->GetMediaSectionCount());
+
+ auto& offerMediaAttrs = parsedOffer->GetMediaSection(0).GetAttributeList();
+ ASSERT_TRUE(offerMediaAttrs.HasAttribute(SdpAttribute::kExtmapAttribute));
+ auto& offerExtmap = offerMediaAttrs.GetExtmap().mExtmaps;
+ ASSERT_EQ(3U, offerExtmap.size());
+ ASSERT_EQ("urn:ietf:params:rtp-hdrext:ssrc-audio-level",
+ offerExtmap[0].extensionname);
+ ASSERT_EQ(1U, offerExtmap[0].entry);
+ ASSERT_EQ("foo", offerExtmap[1].extensionname);
+ ASSERT_EQ(2U, offerExtmap[1].entry);
+ ASSERT_EQ("bar", offerExtmap[2].extensionname);
+ ASSERT_EQ(3U, offerExtmap[2].entry);
+
+ UniquePtr<Sdp> parsedAnswer(Parse(answer));
+ ASSERT_EQ(1U, parsedAnswer->GetMediaSectionCount());
+
+ auto& answerMediaAttrs = parsedAnswer->GetMediaSection(0).GetAttributeList();
+ ASSERT_TRUE(answerMediaAttrs.HasAttribute(SdpAttribute::kExtmapAttribute));
+ auto& answerExtmap = answerMediaAttrs.GetExtmap().mExtmaps;
+ ASSERT_EQ(1U, answerExtmap.size());
+ // We ensure that the entry for "bar" matches what was in the offer
+ ASSERT_EQ("bar", answerExtmap[0].extensionname);
+ ASSERT_EQ(3U, answerExtmap[0].entry);
+}
+
+TEST_F(JsepSessionTest, TestRtcpFbStar)
+{
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ std::string offer = CreateOffer();
+
+ UniquePtr<Sdp> parsedOffer(Parse(offer));
+ auto* rtcpfbs = new SdpRtcpFbAttributeList;
+ rtcpfbs->PushEntry("*", SdpRtcpFbAttributeList::kNack);
+ parsedOffer->GetMediaSection(0).GetAttributeList().SetAttribute(rtcpfbs);
+ offer = parsedOffer->ToString();
+
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ ASSERT_EQ(1U, mSessionAns.GetRemoteTracks().size());
+ RefPtr<JsepTrack> track = mSessionAns.GetRemoteTracks()[0];
+ ASSERT_TRUE(track->GetNegotiatedDetails());
+ auto* details = track->GetNegotiatedDetails();
+ for (const JsepCodecDescription* codec :
+ details->GetEncoding(0).GetCodecs()) {
+ const JsepVideoCodecDescription* videoCodec =
+ static_cast<const JsepVideoCodecDescription*>(codec);
+ ASSERT_EQ(1U, videoCodec->mNackFbTypes.size());
+ ASSERT_EQ("", videoCodec->mNackFbTypes[0]);
+ }
+}
+
+TEST_F(JsepSessionTest, TestUniquePayloadTypes)
+{
+ // The audio payload types will all appear more than once, but the video
+ // payload types will be unique.
+ AddTracks(mSessionOff, "audio,audio,video");
+ AddTracks(mSessionAns, "audio,audio,video");
+
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer, CHECK_SUCCESS);
+ SetRemoteOffer(offer, CHECK_SUCCESS);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer, CHECK_SUCCESS);
+ SetRemoteAnswer(answer, CHECK_SUCCESS);
+
+ auto offerPairs = mSessionOff.GetNegotiatedTrackPairs();
+ auto answerPairs = mSessionAns.GetNegotiatedTrackPairs();
+ ASSERT_EQ(3U, offerPairs.size());
+ ASSERT_EQ(3U, answerPairs.size());
+
+ ASSERT_TRUE(offerPairs[0].mReceiving);
+ ASSERT_TRUE(offerPairs[0].mReceiving->GetNegotiatedDetails());
+ ASSERT_EQ(0U,
+ offerPairs[0].mReceiving->GetNegotiatedDetails()->
+ GetUniquePayloadTypes().size());
+
+ ASSERT_TRUE(offerPairs[1].mReceiving);
+ ASSERT_TRUE(offerPairs[1].mReceiving->GetNegotiatedDetails());
+ ASSERT_EQ(0U,
+ offerPairs[1].mReceiving->GetNegotiatedDetails()->
+ GetUniquePayloadTypes().size());
+
+ ASSERT_TRUE(offerPairs[2].mReceiving);
+ ASSERT_TRUE(offerPairs[2].mReceiving->GetNegotiatedDetails());
+ ASSERT_NE(0U,
+ offerPairs[2].mReceiving->GetNegotiatedDetails()->
+ GetUniquePayloadTypes().size());
+
+ ASSERT_TRUE(answerPairs[0].mReceiving);
+ ASSERT_TRUE(answerPairs[0].mReceiving->GetNegotiatedDetails());
+ ASSERT_EQ(0U,
+ answerPairs[0].mReceiving->GetNegotiatedDetails()->
+ GetUniquePayloadTypes().size());
+
+ ASSERT_TRUE(answerPairs[1].mReceiving);
+ ASSERT_TRUE(answerPairs[1].mReceiving->GetNegotiatedDetails());
+ ASSERT_EQ(0U,
+ answerPairs[1].mReceiving->GetNegotiatedDetails()->
+ GetUniquePayloadTypes().size());
+
+ ASSERT_TRUE(answerPairs[2].mReceiving);
+ ASSERT_TRUE(answerPairs[2].mReceiving->GetNegotiatedDetails());
+ ASSERT_NE(0U,
+ answerPairs[2].mReceiving->GetNegotiatedDetails()->
+ GetUniquePayloadTypes().size());
+}
+
+TEST_F(JsepSessionTest, UnknownFingerprintAlgorithm)
+{
+ types.push_back(SdpMediaSection::kAudio);
+ AddTracks(mSessionOff, "audio");
+ AddTracks(mSessionAns, "audio");
+
+ std::string offer(CreateOffer());
+ SetLocalOffer(offer);
+ ReplaceAll("fingerprint:sha", "fingerprint:foo", &offer);
+ nsresult rv = mSessionAns.SetRemoteDescription(kJsepSdpOffer, offer);
+ ASSERT_NE(NS_OK, rv);
+ ASSERT_NE("", mSessionAns.GetLastError());
+}
+
+TEST(H264ProfileLevelIdTest, TestLevelComparisons)
+{
+ ASSERT_LT(JsepVideoCodecDescription::GetSaneH264Level(0x421D0B), // 1b
+ JsepVideoCodecDescription::GetSaneH264Level(0x420D0B)); // 1.1
+ ASSERT_LT(JsepVideoCodecDescription::GetSaneH264Level(0x420D0A), // 1.0
+ JsepVideoCodecDescription::GetSaneH264Level(0x421D0B)); // 1b
+ ASSERT_LT(JsepVideoCodecDescription::GetSaneH264Level(0x420D0A), // 1.0
+ JsepVideoCodecDescription::GetSaneH264Level(0x420D0B)); // 1.1
+
+ ASSERT_LT(JsepVideoCodecDescription::GetSaneH264Level(0x640009), // 1b
+ JsepVideoCodecDescription::GetSaneH264Level(0x64000B)); // 1.1
+ ASSERT_LT(JsepVideoCodecDescription::GetSaneH264Level(0x64000A), // 1.0
+ JsepVideoCodecDescription::GetSaneH264Level(0x640009)); // 1b
+ ASSERT_LT(JsepVideoCodecDescription::GetSaneH264Level(0x64000A), // 1.0
+ JsepVideoCodecDescription::GetSaneH264Level(0x64000B)); // 1.1
+}
+
+TEST(H264ProfileLevelIdTest, TestLevelSetting)
+{
+ uint32_t profileLevelId = 0x420D0A;
+ JsepVideoCodecDescription::SetSaneH264Level(
+ JsepVideoCodecDescription::GetSaneH264Level(0x42100B),
+ &profileLevelId);
+ ASSERT_EQ((uint32_t)0x421D0B, profileLevelId);
+
+ JsepVideoCodecDescription::SetSaneH264Level(
+ JsepVideoCodecDescription::GetSaneH264Level(0x42000A),
+ &profileLevelId);
+ ASSERT_EQ((uint32_t)0x420D0A, profileLevelId);
+
+ profileLevelId = 0x6E100A;
+ JsepVideoCodecDescription::SetSaneH264Level(
+ JsepVideoCodecDescription::GetSaneH264Level(0x640009),
+ &profileLevelId);
+ ASSERT_EQ((uint32_t)0x6E1009, profileLevelId);
+
+ JsepVideoCodecDescription::SetSaneH264Level(
+ JsepVideoCodecDescription::GetSaneH264Level(0x64000B),
+ &profileLevelId);
+ ASSERT_EQ((uint32_t)0x6E100B, profileLevelId);
+}
+
+TEST_F(JsepSessionTest, StronglyPreferredCodec)
+{
+ for (JsepCodecDescription* codec : mSessionAns.Codecs()) {
+ if (codec->mName == "H264") {
+ codec->mStronglyPreferred = true;
+ }
+ }
+
+ types.push_back(SdpMediaSection::kVideo);
+ AddTracks(mSessionOff, "video");
+ AddTracks(mSessionAns, "video");
+
+ OfferAnswer();
+
+ const JsepCodecDescription* codec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &codec);
+ ASSERT_TRUE(codec);
+ ASSERT_EQ("H264", codec->mName);
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &codec);
+ ASSERT_TRUE(codec);
+ ASSERT_EQ("H264", codec->mName);
+}
+
+TEST_F(JsepSessionTest, LowDynamicPayloadType)
+{
+ SetPayloadTypeNumber(mSessionOff, "opus", "12");
+ types.push_back(SdpMediaSection::kAudio);
+ AddTracks(mSessionOff, "audio");
+ AddTracks(mSessionAns, "audio");
+
+ OfferAnswer();
+ const JsepCodecDescription* codec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &codec);
+ ASSERT_TRUE(codec);
+ ASSERT_EQ("opus", codec->mName);
+ ASSERT_EQ("12", codec->mDefaultPt);
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &codec);
+ ASSERT_TRUE(codec);
+ ASSERT_EQ("opus", codec->mName);
+ ASSERT_EQ("12", codec->mDefaultPt);
+}
+
+TEST_F(JsepSessionTest, PayloadTypeClash)
+{
+ // Disable this so mSessionOff doesn't have a duplicate
+ SetCodecEnabled(mSessionOff, "PCMU", false);
+ SetPayloadTypeNumber(mSessionOff, "opus", "0");
+ SetPayloadTypeNumber(mSessionAns, "PCMU", "0");
+ types.push_back(SdpMediaSection::kAudio);
+ AddTracks(mSessionOff, "audio");
+ AddTracks(mSessionAns, "audio");
+
+ OfferAnswer();
+ const JsepCodecDescription* codec;
+ GetCodec(mSessionAns, 0, sdp::kSend, 0, 0, &codec);
+ ASSERT_TRUE(codec);
+ ASSERT_EQ("opus", codec->mName);
+ ASSERT_EQ("0", codec->mDefaultPt);
+ GetCodec(mSessionAns, 0, sdp::kRecv, 0, 0, &codec);
+ ASSERT_TRUE(codec);
+ ASSERT_EQ("opus", codec->mName);
+ ASSERT_EQ("0", codec->mDefaultPt);
+
+ // Now, make sure that mSessionAns does not put a=rtpmap:0 PCMU in a reoffer,
+ // since pt 0 is taken for opus (the answerer still supports PCMU, and will
+ // reoffer it, but it should choose a new payload type for it)
+ JsepOfferOptions options;
+ std::string reoffer;
+ nsresult rv = mSessionAns.CreateOffer(options, &reoffer);
+ ASSERT_EQ(NS_OK, rv);
+ ASSERT_EQ(std::string::npos, reoffer.find("a=rtpmap:0 PCMU")) << reoffer;
+}
+
+TEST_P(JsepSessionTest, TestGlareRollback)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+ JsepOfferOptions options;
+
+ std::string offer;
+ ASSERT_EQ(NS_OK, mSessionAns.CreateOffer(options, &offer));
+ ASSERT_EQ(NS_OK,
+ mSessionAns.SetLocalDescription(kJsepSdpOffer, offer));
+ ASSERT_EQ(kJsepStateHaveLocalOffer, mSessionAns.GetState());
+
+ ASSERT_EQ(NS_OK, mSessionOff.CreateOffer(options, &offer));
+ ASSERT_EQ(NS_OK,
+ mSessionOff.SetLocalDescription(kJsepSdpOffer, offer));
+ ASSERT_EQ(kJsepStateHaveLocalOffer, mSessionOff.GetState());
+
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionAns.SetRemoteDescription(kJsepSdpOffer, offer));
+ ASSERT_EQ(NS_OK,
+ mSessionAns.SetLocalDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(kJsepStateStable, mSessionAns.GetState());
+
+ SetRemoteOffer(offer);
+
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+}
+
+TEST_P(JsepSessionTest, TestRejectOfferRollback)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ std::string offer = CreateOffer();
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+
+ ASSERT_EQ(NS_OK,
+ mSessionAns.SetRemoteDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(kJsepStateStable, mSessionAns.GetState());
+ ASSERT_EQ(types.size(), mSessionAns.GetRemoteTracksRemoved().size());
+
+ ASSERT_EQ(NS_OK,
+ mSessionOff.SetLocalDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(kJsepStateStable, mSessionOff.GetState());
+
+ OfferAnswer();
+}
+
+TEST_P(JsepSessionTest, TestInvalidRollback)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetLocalDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetRemoteDescription(kJsepSdpRollback, ""));
+
+ std::string offer = CreateOffer();
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetLocalDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetRemoteDescription(kJsepSdpRollback, ""));
+
+ SetLocalOffer(offer);
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetRemoteDescription(kJsepSdpRollback, ""));
+
+ SetRemoteOffer(offer);
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionAns.SetLocalDescription(kJsepSdpRollback, ""));
+
+ std::string answer = CreateAnswer();
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionAns.SetLocalDescription(kJsepSdpRollback, ""));
+
+ SetLocalAnswer(answer);
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionAns.SetLocalDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionAns.SetRemoteDescription(kJsepSdpRollback, ""));
+
+ SetRemoteAnswer(answer);
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetLocalDescription(kJsepSdpRollback, ""));
+ ASSERT_EQ(NS_ERROR_UNEXPECTED,
+ mSessionOff.SetRemoteDescription(kJsepSdpRollback, ""));
+}
+
+size_t GetActiveTransportCount(const JsepSession& session)
+{
+ auto transports = session.GetTransports();
+ size_t activeTransportCount = 0;
+ for (RefPtr<JsepTransport>& transport : transports) {
+ activeTransportCount += transport->mComponents;
+ }
+ return activeTransportCount;
+}
+
+TEST_P(JsepSessionTest, TestBalancedBundle)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ mSessionOff.SetBundlePolicy(kBundleBalanced);
+
+ std::string offer = CreateOffer();
+ SipccSdpParser parser;
+ UniquePtr<Sdp> parsedOffer = parser.Parse(offer);
+ ASSERT_TRUE(parsedOffer.get());
+
+ std::map<SdpMediaSection::MediaType, SdpMediaSection*> firstByType;
+
+ for (size_t i = 0; i < parsedOffer->GetMediaSectionCount(); ++i) {
+ SdpMediaSection& msection(parsedOffer->GetMediaSection(i));
+ bool firstOfType = !firstByType.count(msection.GetMediaType());
+ if (firstOfType) {
+ firstByType[msection.GetMediaType()] = &msection;
+ }
+ ASSERT_EQ(!firstOfType,
+ msection.GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+ }
+
+ SetLocalOffer(offer);
+ SetRemoteOffer(offer);
+ std::string answer = CreateAnswer();
+ SetLocalAnswer(answer);
+ SetRemoteAnswer(answer);
+
+ CheckPairs(mSessionOff, "Offerer pairs");
+ CheckPairs(mSessionAns, "Answerer pairs");
+ EXPECT_EQ(1U, GetActiveTransportCount(mSessionOff));
+ EXPECT_EQ(1U, GetActiveTransportCount(mSessionAns));
+}
+
+TEST_P(JsepSessionTest, TestMaxBundle)
+{
+ AddTracks(mSessionOff);
+ AddTracks(mSessionAns);
+
+ mSessionOff.SetBundlePolicy(kBundleMaxBundle);
+ OfferAnswer();
+
+ std::string offer = mSessionOff.GetLocalDescription();
+ SipccSdpParser parser;
+ UniquePtr<Sdp> parsedOffer = parser.Parse(offer);
+ ASSERT_TRUE(parsedOffer.get());
+
+ ASSERT_FALSE(
+ parsedOffer->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+ for (size_t i = 1; i < parsedOffer->GetMediaSectionCount(); ++i) {
+ ASSERT_TRUE(
+ parsedOffer->GetMediaSection(i).GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+ }
+
+
+ CheckPairs(mSessionOff, "Offerer pairs");
+ CheckPairs(mSessionAns, "Answerer pairs");
+ EXPECT_EQ(1U, GetActiveTransportCount(mSessionOff));
+ EXPECT_EQ(1U, GetActiveTransportCount(mSessionAns));
+}
+
+TEST_F(JsepSessionTest, TestNonDefaultProtocol)
+{
+ AddTracks(mSessionOff, "audio,video,datachannel");
+ AddTracks(mSessionAns, "audio,video,datachannel");
+
+ std::string offer;
+ ASSERT_EQ(NS_OK, mSessionOff.CreateOffer(JsepOfferOptions(), &offer));
+ offer.replace(offer.find("UDP/TLS/RTP/SAVPF"),
+ strlen("UDP/TLS/RTP/SAVPF"),
+ "RTP/SAVPF");
+ offer.replace(offer.find("UDP/TLS/RTP/SAVPF"),
+ strlen("UDP/TLS/RTP/SAVPF"),
+ "RTP/SAVPF");
+ mSessionOff.SetLocalDescription(kJsepSdpOffer, offer);
+ mSessionAns.SetRemoteDescription(kJsepSdpOffer, offer);
+
+ std::string answer;
+ mSessionAns.CreateAnswer(JsepAnswerOptions(), &answer);
+ UniquePtr<Sdp> parsedAnswer = Parse(answer);
+ ASSERT_EQ(3U, parsedAnswer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ parsedAnswer->GetMediaSection(0).GetProtocol());
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ parsedAnswer->GetMediaSection(1).GetProtocol());
+
+ mSessionAns.SetLocalDescription(kJsepSdpAnswer, answer);
+ mSessionOff.SetRemoteDescription(kJsepSdpAnswer, answer);
+
+ // Make sure reoffer uses the same protocol as before
+ mSessionOff.CreateOffer(JsepOfferOptions(), &offer);
+ UniquePtr<Sdp> parsedOffer = Parse(offer);
+ ASSERT_EQ(3U, parsedOffer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ parsedOffer->GetMediaSection(0).GetProtocol());
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ parsedOffer->GetMediaSection(1).GetProtocol());
+
+ // Make sure reoffer from other side uses the same protocol as before
+ mSessionAns.CreateOffer(JsepOfferOptions(), &offer);
+ parsedOffer = Parse(offer);
+ ASSERT_EQ(3U, parsedOffer->GetMediaSectionCount());
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ parsedOffer->GetMediaSection(0).GetProtocol());
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ parsedOffer->GetMediaSection(1).GetProtocol());
+}
+
+} // namespace mozilla
+
+int
+main(int argc, char** argv)
+{
+ // Prevents some log spew
+ ScopedXPCOM xpcom("jsep_session_unittest");
+
+ NSS_NoDB_Init(nullptr);
+ NSS_SetDomesticPolicy();
+
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/webrtc/signaling/test/jsep_track_unittest.cpp b/media/webrtc/signaling/test/jsep_track_unittest.cpp
new file mode 100644
index 000000000..a09d47276
--- /dev/null
+++ b/media/webrtc/signaling/test/jsep_track_unittest.cpp
@@ -0,0 +1,1269 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+// Magic linker includes :(
+#include "FakeMediaStreams.h"
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+
+#include "signaling/src/jsep/JsepTrack.h"
+#include "signaling/src/sdp/SipccSdp.h"
+#include "signaling/src/sdp/SdpHelper.h"
+
+#include "mtransport_test_utils.h"
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+#include "TestHarness.h"
+
+namespace mozilla {
+
+class JsepTrackTest : public ::testing::Test
+{
+ public:
+ JsepTrackTest() {}
+
+ std::vector<JsepCodecDescription*>
+ MakeCodecs(bool addFecCodecs = false,
+ bool preferRed = false,
+ bool addDtmfCodec = false) const
+ {
+ std::vector<JsepCodecDescription*> results;
+ results.push_back(
+ new JsepAudioCodecDescription("1", "opus", 48000, 2, 960, 40000));
+ results.push_back(
+ new JsepAudioCodecDescription("9", "G722", 8000, 1, 320, 64000));
+ if (addDtmfCodec) {
+ results.push_back(
+ new JsepAudioCodecDescription("101", "telephone-event",
+ 8000, 1, 0, 0));
+ }
+
+ JsepVideoCodecDescription* red = nullptr;
+ if (addFecCodecs && preferRed) {
+ red = new JsepVideoCodecDescription(
+ "122",
+ "red",
+ 90000
+ );
+ results.push_back(red);
+ }
+
+ JsepVideoCodecDescription* vp8 =
+ new JsepVideoCodecDescription("120", "VP8", 90000);
+ vp8->mConstraints.maxFs = 12288;
+ vp8->mConstraints.maxFps = 60;
+ results.push_back(vp8);
+
+ JsepVideoCodecDescription* h264 =
+ new JsepVideoCodecDescription("126", "H264", 90000);
+ h264->mPacketizationMode = 1;
+ h264->mProfileLevelId = 0x42E00D;
+ results.push_back(h264);
+
+ if (addFecCodecs) {
+ if (!preferRed) {
+ red = new JsepVideoCodecDescription(
+ "122",
+ "red",
+ 90000
+ );
+ results.push_back(red);
+ }
+ JsepVideoCodecDescription* ulpfec = new JsepVideoCodecDescription(
+ "123",
+ "ulpfec",
+ 90000
+ );
+ results.push_back(ulpfec);
+ }
+
+ results.push_back(
+ new JsepApplicationCodecDescription(
+ "5000",
+ "webrtc-datachannel",
+ 16
+ ));
+
+ // if we're doing something with red, it needs
+ // to update the redundant encodings list
+ if (red) {
+ red->UpdateRedundantEncodings(results);
+ }
+
+ return results;
+ }
+
+ void Init(SdpMediaSection::MediaType type) {
+ InitCodecs();
+ InitTracks(type);
+ InitSdp(type);
+ }
+
+ void InitCodecs() {
+ mOffCodecs.values = MakeCodecs();
+ mAnsCodecs.values = MakeCodecs();
+ }
+
+ void InitTracks(SdpMediaSection::MediaType type)
+ {
+ mSendOff = new JsepTrack(type, "stream_id", "track_id", sdp::kSend);
+ mRecvOff = new JsepTrack(type, "stream_id", "track_id", sdp::kRecv);
+ mSendOff->PopulateCodecs(mOffCodecs.values);
+ mRecvOff->PopulateCodecs(mOffCodecs.values);
+
+ mSendAns = new JsepTrack(type, "stream_id", "track_id", sdp::kSend);
+ mRecvAns = new JsepTrack(type, "stream_id", "track_id", sdp::kRecv);
+ mSendAns->PopulateCodecs(mAnsCodecs.values);
+ mRecvAns->PopulateCodecs(mAnsCodecs.values);
+ }
+
+ void InitSdp(SdpMediaSection::MediaType type)
+ {
+ mOffer.reset(new SipccSdp(SdpOrigin("", 0, 0, sdp::kIPv4, "")));
+ mOffer->AddMediaSection(
+ type,
+ SdpDirectionAttribute::kInactive,
+ 0,
+ SdpHelper::GetProtocolForMediaType(type),
+ sdp::kIPv4,
+ "0.0.0.0");
+ mAnswer.reset(new SipccSdp(SdpOrigin("", 0, 0, sdp::kIPv4, "")));
+ mAnswer->AddMediaSection(
+ type,
+ SdpDirectionAttribute::kInactive,
+ 0,
+ SdpHelper::GetProtocolForMediaType(type),
+ sdp::kIPv4,
+ "0.0.0.0");
+ }
+
+ SdpMediaSection& GetOffer()
+ {
+ return mOffer->GetMediaSection(0);
+ }
+
+ SdpMediaSection& GetAnswer()
+ {
+ return mAnswer->GetMediaSection(0);
+ }
+
+ void CreateOffer()
+ {
+ if (mSendOff) {
+ mSendOff->AddToOffer(&GetOffer());
+ }
+
+ if (mRecvOff) {
+ mRecvOff->AddToOffer(&GetOffer());
+ }
+ }
+
+ void CreateAnswer()
+ {
+ if (mSendAns && GetOffer().IsReceiving()) {
+ mSendAns->AddToAnswer(GetOffer(), &GetAnswer());
+ }
+
+ if (mRecvAns && GetOffer().IsSending()) {
+ mRecvAns->AddToAnswer(GetOffer(), &GetAnswer());
+ }
+ }
+
+ void Negotiate()
+ {
+ std::cerr << "Offer SDP: " << std::endl;
+ mOffer->Serialize(std::cerr);
+
+ std::cerr << "Answer SDP: " << std::endl;
+ mAnswer->Serialize(std::cerr);
+
+ if (mSendAns && GetAnswer().IsSending()) {
+ mSendAns->Negotiate(GetAnswer(), GetOffer());
+ }
+
+ if (mRecvAns && GetAnswer().IsReceiving()) {
+ mRecvAns->Negotiate(GetAnswer(), GetOffer());
+ }
+
+ if (mSendOff && GetAnswer().IsReceiving()) {
+ mSendOff->Negotiate(GetAnswer(), GetAnswer());
+ }
+
+ if (mRecvOff && GetAnswer().IsSending()) {
+ mRecvOff->Negotiate(GetAnswer(), GetAnswer());
+ }
+ }
+
+ void OfferAnswer()
+ {
+ CreateOffer();
+ CreateAnswer();
+ Negotiate();
+ SanityCheck();
+ }
+
+ static size_t EncodingCount(const RefPtr<JsepTrack>& track)
+ {
+ return track->GetNegotiatedDetails()->GetEncodingCount();
+ }
+
+ // TODO: Look into writing a macro that wraps an ASSERT_ and returns false
+ // if it fails (probably requires writing a bool-returning function that
+ // takes a void-returning lambda with a bool outparam, which will in turn
+ // invokes the ASSERT_)
+ static void CheckEncodingCount(size_t expected,
+ const RefPtr<JsepTrack>& send,
+ const RefPtr<JsepTrack>& recv)
+ {
+ if (expected) {
+ ASSERT_TRUE(!!send);
+ ASSERT_TRUE(send->GetNegotiatedDetails());
+ ASSERT_TRUE(!!recv);
+ ASSERT_TRUE(recv->GetNegotiatedDetails());
+ }
+
+ if (send && send->GetNegotiatedDetails()) {
+ ASSERT_EQ(expected, send->GetNegotiatedDetails()->GetEncodingCount());
+ }
+
+ if (recv && recv->GetNegotiatedDetails()) {
+ ASSERT_EQ(expected, recv->GetNegotiatedDetails()->GetEncodingCount());
+ }
+ }
+
+ void CheckOffEncodingCount(size_t expected) const
+ {
+ CheckEncodingCount(expected, mSendOff, mRecvAns);
+ }
+
+ void CheckAnsEncodingCount(size_t expected) const
+ {
+ CheckEncodingCount(expected, mSendAns, mRecvOff);
+ }
+
+ const JsepCodecDescription*
+ GetCodec(const JsepTrack& track,
+ SdpMediaSection::MediaType type,
+ size_t expectedSize,
+ size_t codecIndex) const
+ {
+ if (!track.GetNegotiatedDetails() ||
+ track.GetNegotiatedDetails()->GetEncodingCount() != 1U ||
+ track.GetMediaType() != type) {
+ return nullptr;
+ }
+ const std::vector<JsepCodecDescription*>& codecs =
+ track.GetNegotiatedDetails()->GetEncoding(0).GetCodecs();
+ // it should not be possible for codecs to have a different type
+ // than the track, but we'll check the codec here just in case.
+ if (codecs.size() != expectedSize || codecIndex >= expectedSize ||
+ codecs[codecIndex]->mType != type) {
+ return nullptr;
+ }
+ return codecs[codecIndex];
+ }
+
+ const JsepVideoCodecDescription*
+ GetVideoCodec(const JsepTrack& track,
+ size_t expectedSize = 1,
+ size_t codecIndex = 0) const
+ {
+ return static_cast<const JsepVideoCodecDescription*>
+ (GetCodec(track, SdpMediaSection::kVideo, expectedSize, codecIndex));
+ }
+
+ const JsepAudioCodecDescription*
+ GetAudioCodec(const JsepTrack& track,
+ size_t expectedSize = 1,
+ size_t codecIndex = 0) const
+ {
+ return static_cast<const JsepAudioCodecDescription*>
+ (GetCodec(track, SdpMediaSection::kAudio, expectedSize, codecIndex));
+ }
+
+ void CheckOtherFbsSize(const JsepTrack& track, size_t expected) const
+ {
+ const JsepVideoCodecDescription* videoCodec = GetVideoCodec(track);
+ ASSERT_NE(videoCodec, nullptr);
+ ASSERT_EQ(videoCodec->mOtherFbTypes.size(), expected);
+ }
+
+ void CheckOtherFbExists(const JsepTrack& track,
+ SdpRtcpFbAttributeList::Type type) const
+ {
+ const JsepVideoCodecDescription* videoCodec = GetVideoCodec(track);
+ ASSERT_NE(videoCodec, nullptr);
+ for (const auto& fb : videoCodec->mOtherFbTypes) {
+ if (fb.type == type) {
+ return; // found the RtcpFb type, so stop looking
+ }
+ }
+ FAIL(); // RtcpFb type not found
+ }
+
+ void SanityCheckRtcpFbs(const JsepVideoCodecDescription& a,
+ const JsepVideoCodecDescription& b) const
+ {
+ ASSERT_EQ(a.mNackFbTypes.size(), b.mNackFbTypes.size());
+ ASSERT_EQ(a.mAckFbTypes.size(), b.mAckFbTypes.size());
+ ASSERT_EQ(a.mCcmFbTypes.size(), b.mCcmFbTypes.size());
+ ASSERT_EQ(a.mOtherFbTypes.size(), b.mOtherFbTypes.size());
+ }
+
+ void SanityCheckCodecs(const JsepCodecDescription& a,
+ const JsepCodecDescription& b) const
+ {
+ ASSERT_EQ(a.mType, b.mType);
+ ASSERT_EQ(a.mDefaultPt, b.mDefaultPt);
+ ASSERT_EQ(a.mName, b.mName);
+ ASSERT_EQ(a.mClock, b.mClock);
+ ASSERT_EQ(a.mChannels, b.mChannels);
+ ASSERT_NE(a.mDirection, b.mDirection);
+ // These constraints are for fmtp and rid, which _are_ signaled
+ ASSERT_EQ(a.mConstraints, b.mConstraints);
+
+ if (a.mType == SdpMediaSection::kVideo) {
+ SanityCheckRtcpFbs(static_cast<const JsepVideoCodecDescription&>(a),
+ static_cast<const JsepVideoCodecDescription&>(b));
+ }
+ }
+
+ void SanityCheckEncodings(const JsepTrackEncoding& a,
+ const JsepTrackEncoding& b) const
+ {
+ ASSERT_EQ(a.GetCodecs().size(), b.GetCodecs().size());
+ for (size_t i = 0; i < a.GetCodecs().size(); ++i) {
+ SanityCheckCodecs(*a.GetCodecs()[i], *b.GetCodecs()[i]);
+ }
+
+ ASSERT_EQ(a.mRid, b.mRid);
+ // mConstraints will probably differ, since they are not signaled to the
+ // other side.
+ }
+
+ void SanityCheckNegotiatedDetails(const JsepTrackNegotiatedDetails& a,
+ const JsepTrackNegotiatedDetails& b) const
+ {
+ ASSERT_EQ(a.GetEncodingCount(), b.GetEncodingCount());
+ for (size_t i = 0; i < a.GetEncodingCount(); ++i) {
+ SanityCheckEncodings(a.GetEncoding(i), b.GetEncoding(i));
+ }
+
+ ASSERT_EQ(a.GetUniquePayloadTypes().size(),
+ b.GetUniquePayloadTypes().size());
+ for (size_t i = 0; i < a.GetUniquePayloadTypes().size(); ++i) {
+ ASSERT_EQ(a.GetUniquePayloadTypes()[i], b.GetUniquePayloadTypes()[i]);
+ }
+ }
+
+ void SanityCheckTracks(const JsepTrack& a, const JsepTrack& b) const
+ {
+ if (!a.GetNegotiatedDetails()) {
+ ASSERT_FALSE(!!b.GetNegotiatedDetails());
+ return;
+ }
+
+ ASSERT_TRUE(!!a.GetNegotiatedDetails());
+ ASSERT_TRUE(!!b.GetNegotiatedDetails());
+ ASSERT_EQ(a.GetMediaType(), b.GetMediaType());
+ ASSERT_EQ(a.GetStreamId(), b.GetStreamId());
+ ASSERT_EQ(a.GetTrackId(), b.GetTrackId());
+ ASSERT_EQ(a.GetCNAME(), b.GetCNAME());
+ ASSERT_NE(a.GetDirection(), b.GetDirection());
+ ASSERT_EQ(a.GetSsrcs().size(), b.GetSsrcs().size());
+ for (size_t i = 0; i < a.GetSsrcs().size(); ++i) {
+ ASSERT_EQ(a.GetSsrcs()[i], b.GetSsrcs()[i]);
+ }
+
+ SanityCheckNegotiatedDetails(*a.GetNegotiatedDetails(),
+ *b.GetNegotiatedDetails());
+ }
+
+ void SanityCheck() const
+ {
+ if (mSendOff && mRecvAns) {
+ SanityCheckTracks(*mSendOff, *mRecvAns);
+ }
+ if (mRecvOff && mSendAns) {
+ SanityCheckTracks(*mRecvOff, *mSendAns);
+ }
+ }
+
+ protected:
+ RefPtr<JsepTrack> mSendOff;
+ RefPtr<JsepTrack> mRecvOff;
+ RefPtr<JsepTrack> mSendAns;
+ RefPtr<JsepTrack> mRecvAns;
+ PtrVector<JsepCodecDescription> mOffCodecs;
+ PtrVector<JsepCodecDescription> mAnsCodecs;
+ UniquePtr<Sdp> mOffer;
+ UniquePtr<Sdp> mAnswer;
+};
+
+TEST_F(JsepTrackTest, CreateDestroy)
+{
+ Init(SdpMediaSection::kAudio);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiation)
+{
+ Init(SdpMediaSection::kAudio);
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+}
+
+TEST_F(JsepTrackTest, VideoNegotiation)
+{
+ Init(SdpMediaSection::kVideo);
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+}
+
+class CheckForCodecType
+{
+public:
+ explicit CheckForCodecType(SdpMediaSection::MediaType type,
+ bool *result) :
+ mResult(result),
+ mType(type) {}
+
+ void operator()(JsepCodecDescription* codec) {
+ if (codec->mType == mType) {
+ *mResult = true;
+ }
+ }
+
+private:
+ bool *mResult;
+ SdpMediaSection::MediaType mType;
+};
+
+TEST_F(JsepTrackTest, CheckForMismatchedAudioCodecAndVideoTrack)
+{
+ PtrVector<JsepCodecDescription> offerCodecs;
+
+ // make codecs including telephone-event (an audio codec)
+ offerCodecs.values = MakeCodecs(false, false, true);
+ RefPtr<JsepTrack> videoTrack = new JsepTrack(SdpMediaSection::kVideo,
+ "stream_id",
+ "track_id",
+ sdp::kSend);
+ // populate codecs and then make sure we don't have any audio codecs
+ // in the video track
+ videoTrack->PopulateCodecs(offerCodecs.values);
+
+ bool found = false;
+ videoTrack->ForEachCodec(CheckForCodecType(SdpMediaSection::kAudio, &found));
+ ASSERT_FALSE(found);
+
+ found = false;
+ videoTrack->ForEachCodec(CheckForCodecType(SdpMediaSection::kVideo, &found));
+ ASSERT_TRUE(found); // for sanity, make sure we did find video codecs
+}
+
+TEST_F(JsepTrackTest, CheckVideoTrackWithHackedDtmfSdp)
+{
+ Init(SdpMediaSection::kVideo);
+ CreateOffer();
+ // make sure we don't find sdp containing telephone-event in video track
+ ASSERT_EQ(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ // force audio codec telephone-event into video m= section of offer
+ GetOffer().AddCodec("101", "telephone-event", 8000, 1);
+ // make sure we _do_ find sdp containing telephone-event in video track
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ CreateAnswer();
+ // make sure we don't find sdp containing telephone-event in video track
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ // force audio codec telephone-event into video m= section of answer
+ GetAnswer().AddCodec("101", "telephone-event", 8000, 1);
+ // make sure we _do_ find sdp containing telephone-event in video track
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ Negotiate();
+ SanityCheck();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_TRUE(mSendOff.get());
+ ASSERT_TRUE(mRecvOff.get());
+ ASSERT_TRUE(mSendAns.get());
+ ASSERT_TRUE(mRecvAns.get());
+
+ // make sure we still don't find any audio codecs in the video track after
+ // hacking the sdp
+ bool found = false;
+ mSendOff->ForEachCodec(CheckForCodecType(SdpMediaSection::kAudio, &found));
+ ASSERT_FALSE(found);
+ mRecvOff->ForEachCodec(CheckForCodecType(SdpMediaSection::kAudio, &found));
+ ASSERT_FALSE(found);
+ mSendAns->ForEachCodec(CheckForCodecType(SdpMediaSection::kAudio, &found));
+ ASSERT_FALSE(found);
+ mRecvAns->ForEachCodec(CheckForCodecType(SdpMediaSection::kAudio, &found));
+ ASSERT_FALSE(found);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiationOffererDtmf)
+{
+ mOffCodecs.values = MakeCodecs(false, false, true);
+ mAnsCodecs.values = MakeCodecs(false, false, false);
+
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:101 0-15"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=fmtp:101"), std::string::npos);
+
+ const JsepAudioCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns)));
+ ASSERT_EQ("1", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiationAnswererDtmf)
+{
+ mOffCodecs.values = MakeCodecs(false, false, false);
+ mAnsCodecs.values = MakeCodecs(false, false, true);
+
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_EQ(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ ASSERT_EQ(mOffer->ToString().find("a=fmtp:101 0-15"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=fmtp:101"), std::string::npos);
+
+ const JsepAudioCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns)));
+ ASSERT_EQ("1", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiationOffererAnswererDtmf)
+{
+ mOffCodecs.values = MakeCodecs(false, false, true);
+ mAnsCodecs.values = MakeCodecs(false, false, true);
+
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:101 0-15"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=fmtp:101 0-15"), std::string::npos);
+
+ const JsepAudioCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiationDtmfOffererNoFmtpAnswererFmtp)
+{
+ mOffCodecs.values = MakeCodecs(false, false, true);
+ mAnsCodecs.values = MakeCodecs(false, false, true);
+
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+
+ CreateOffer();
+ GetOffer().RemoveFmtp("101");
+
+ CreateAnswer();
+
+ Negotiate();
+ SanityCheck();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ ASSERT_EQ(mOffer->ToString().find("a=fmtp:101"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=fmtp:101 0-15"), std::string::npos);
+
+ const JsepAudioCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiationDtmfOffererFmtpAnswererNoFmtp)
+{
+ mOffCodecs.values = MakeCodecs(false, false, true);
+ mAnsCodecs.values = MakeCodecs(false, false, true);
+
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+
+ CreateOffer();
+
+ CreateAnswer();
+ GetAnswer().RemoveFmtp("101");
+
+ Negotiate();
+ SanityCheck();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:101 0-15"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=fmtp:101"), std::string::npos);
+
+ const JsepAudioCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, AudioNegotiationDtmfOffererNoFmtpAnswererNoFmtp)
+{
+ mOffCodecs.values = MakeCodecs(false, false, true);
+ mAnsCodecs.values = MakeCodecs(false, false, true);
+
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+
+ CreateOffer();
+ GetOffer().RemoveFmtp("101");
+
+ CreateAnswer();
+ GetAnswer().RemoveFmtp("101");
+
+ Negotiate();
+ SanityCheck();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:101 telephone-event"),
+ std::string::npos);
+
+ ASSERT_EQ(mOffer->ToString().find("a=fmtp:101"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=fmtp:101"), std::string::npos);
+
+ const JsepAudioCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2)));
+ ASSERT_EQ("1", track->mDefaultPt);
+
+ ASSERT_TRUE((track = GetAudioCodec(*mSendOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvOff, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mSendAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+ ASSERT_TRUE((track = GetAudioCodec(*mRecvAns, 2, 1)));
+ ASSERT_EQ("101", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, VideoNegotationOffererFEC)
+{
+ mOffCodecs.values = MakeCodecs(true);
+ mAnsCodecs.values = MakeCodecs(false);
+
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:122 120/126/123"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=fmtp:122"), std::string::npos);
+
+ const JsepVideoCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetVideoCodec(*mSendOff)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvOff)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mSendAns)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvAns)));
+ ASSERT_EQ("120", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, VideoNegotationAnswererFEC)
+{
+ mOffCodecs.values = MakeCodecs(false);
+ mAnsCodecs.values = MakeCodecs(true);
+
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_EQ(mOffer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_EQ(mOffer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+
+ ASSERT_EQ(mOffer->ToString().find("a=fmtp:122"), std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=fmtp:122"), std::string::npos);
+
+ const JsepVideoCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetVideoCodec(*mSendOff)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvOff)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mSendAns)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvAns)));
+ ASSERT_EQ("120", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, VideoNegotationOffererAnswererFEC)
+{
+ mOffCodecs.values = MakeCodecs(true);
+ mAnsCodecs.values = MakeCodecs(true);
+
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:122 120/126/123"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=fmtp:122 120/126/123"), std::string::npos);
+
+ const JsepVideoCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetVideoCodec(*mSendOff, 4)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvOff, 4)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mSendAns, 4)));
+ ASSERT_EQ("120", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvAns, 4)));
+ ASSERT_EQ("120", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, VideoNegotationOffererAnswererFECPreferred)
+{
+ mOffCodecs.values = MakeCodecs(true, true);
+ mAnsCodecs.values = MakeCodecs(true);
+
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:122 120/126/123"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=fmtp:122 120/126/123"), std::string::npos);
+
+ const JsepVideoCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetVideoCodec(*mSendOff, 4)));
+ ASSERT_EQ("122", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvOff, 4)));
+ ASSERT_EQ("122", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mSendAns, 4)));
+ ASSERT_EQ("122", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvAns, 4)));
+ ASSERT_EQ("122", track->mDefaultPt);
+}
+
+// Make sure we only put the right things in the fmtp:122 120/.... line
+TEST_F(JsepTrackTest, VideoNegotationOffererAnswererFECMismatch)
+{
+ mOffCodecs.values = MakeCodecs(true, true);
+ mAnsCodecs.values = MakeCodecs(true);
+ // remove h264 from answer codecs
+ ASSERT_EQ("H264", mAnsCodecs.values[3]->mName);
+ mAnsCodecs.values.erase(mAnsCodecs.values.begin()+3);
+
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:122 120/126/123"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=fmtp:122 120/123"), std::string::npos);
+
+ const JsepVideoCodecDescription* track = nullptr;
+ ASSERT_TRUE((track = GetVideoCodec(*mSendOff, 3)));
+ ASSERT_EQ("122", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvOff, 3)));
+ ASSERT_EQ("122", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mSendAns, 3)));
+ ASSERT_EQ("122", track->mDefaultPt);
+ ASSERT_TRUE((track = GetVideoCodec(*mRecvAns, 3)));
+ ASSERT_EQ("122", track->mDefaultPt);
+}
+
+TEST_F(JsepTrackTest, VideoNegotationOffererAnswererFECZeroVP9Codec)
+{
+ mOffCodecs.values = MakeCodecs(true);
+ JsepVideoCodecDescription* vp9 =
+ new JsepVideoCodecDescription("0", "VP9", 90000);
+ vp9->mConstraints.maxFs = 12288;
+ vp9->mConstraints.maxFps = 60;
+ mOffCodecs.values.push_back(vp9);
+
+ ASSERT_EQ(8U, mOffCodecs.values.size());
+ JsepVideoCodecDescription* red =
+ static_cast<JsepVideoCodecDescription*>(mOffCodecs.values[4]);
+ ASSERT_EQ("red", red->mName);
+ // rebuild the redundant encodings with our newly added "wacky" VP9
+ red->mRedundantEncodings.clear();
+ red->UpdateRedundantEncodings(mOffCodecs.values);
+
+ mAnsCodecs.values = MakeCodecs(true);
+
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mOffer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:122 red"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtpmap:123 ulpfec"), std::string::npos);
+
+ ASSERT_NE(mOffer->ToString().find("a=fmtp:122 120/126/123/0"), std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=fmtp:122 120/126/123\r\n"), std::string::npos);
+}
+
+TEST_F(JsepTrackTest, VideoNegotiationOfferRemb)
+{
+ InitCodecs();
+ // enable remb on the offer codecs
+ ((JsepVideoCodecDescription*)mOffCodecs.values[2])->EnableRemb();
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ // make sure REMB is on offer and not on answer
+ ASSERT_NE(mOffer->ToString().find("a=rtcp-fb:120 goog-remb"),
+ std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtcp-fb:120 goog-remb"),
+ std::string::npos);
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ CheckOtherFbsSize(*mSendOff, 0);
+ CheckOtherFbsSize(*mRecvAns, 0);
+
+ CheckOtherFbsSize(*mSendAns, 0);
+ CheckOtherFbsSize(*mRecvOff, 0);
+}
+
+TEST_F(JsepTrackTest, VideoNegotiationAnswerRemb)
+{
+ InitCodecs();
+ // enable remb on the answer codecs
+ ((JsepVideoCodecDescription*)mAnsCodecs.values[2])->EnableRemb();
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ // make sure REMB is not on offer and not on answer
+ ASSERT_EQ(mOffer->ToString().find("a=rtcp-fb:120 goog-remb"),
+ std::string::npos);
+ ASSERT_EQ(mAnswer->ToString().find("a=rtcp-fb:120 goog-remb"),
+ std::string::npos);
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ CheckOtherFbsSize(*mSendOff, 0);
+ CheckOtherFbsSize(*mRecvAns, 0);
+
+ CheckOtherFbsSize(*mSendAns, 0);
+ CheckOtherFbsSize(*mRecvOff, 0);
+}
+
+TEST_F(JsepTrackTest, VideoNegotiationOfferAnswerRemb)
+{
+ InitCodecs();
+ // enable remb on the offer and answer codecs
+ ((JsepVideoCodecDescription*)mOffCodecs.values[2])->EnableRemb();
+ ((JsepVideoCodecDescription*)mAnsCodecs.values[2])->EnableRemb();
+ InitTracks(SdpMediaSection::kVideo);
+ InitSdp(SdpMediaSection::kVideo);
+ OfferAnswer();
+
+ // make sure REMB is on offer and on answer
+ ASSERT_NE(mOffer->ToString().find("a=rtcp-fb:120 goog-remb"),
+ std::string::npos);
+ ASSERT_NE(mAnswer->ToString().find("a=rtcp-fb:120 goog-remb"),
+ std::string::npos);
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+
+ CheckOtherFbsSize(*mSendOff, 1);
+ CheckOtherFbsSize(*mRecvAns, 1);
+ CheckOtherFbExists(*mSendOff, SdpRtcpFbAttributeList::kRemb);
+ CheckOtherFbExists(*mRecvAns, SdpRtcpFbAttributeList::kRemb);
+
+ CheckOtherFbsSize(*mSendAns, 1);
+ CheckOtherFbsSize(*mRecvOff, 1);
+ CheckOtherFbExists(*mSendAns, SdpRtcpFbAttributeList::kRemb);
+ CheckOtherFbExists(*mRecvOff, SdpRtcpFbAttributeList::kRemb);
+}
+
+TEST_F(JsepTrackTest, AudioOffSendonlyAnsRecvonly)
+{
+ Init(SdpMediaSection::kAudio);
+ mRecvOff = nullptr;
+ mSendAns = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(0);
+}
+
+TEST_F(JsepTrackTest, VideoOffSendonlyAnsRecvonly)
+{
+ Init(SdpMediaSection::kVideo);
+ mRecvOff = nullptr;
+ mSendAns = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(0);
+}
+
+TEST_F(JsepTrackTest, AudioOffSendrecvAnsRecvonly)
+{
+ Init(SdpMediaSection::kAudio);
+ mSendAns = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(0);
+}
+
+TEST_F(JsepTrackTest, VideoOffSendrecvAnsRecvonly)
+{
+ Init(SdpMediaSection::kVideo);
+ mSendAns = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(0);
+}
+
+TEST_F(JsepTrackTest, AudioOffRecvonlyAnsSendrecv)
+{
+ Init(SdpMediaSection::kAudio);
+ mSendOff = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(0);
+ CheckAnsEncodingCount(1);
+}
+
+TEST_F(JsepTrackTest, VideoOffRecvonlyAnsSendrecv)
+{
+ Init(SdpMediaSection::kVideo);
+ mSendOff = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(0);
+ CheckAnsEncodingCount(1);
+}
+
+TEST_F(JsepTrackTest, AudioOffSendrecvAnsSendonly)
+{
+ Init(SdpMediaSection::kAudio);
+ mRecvAns = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(0);
+ CheckAnsEncodingCount(1);
+}
+
+TEST_F(JsepTrackTest, VideoOffSendrecvAnsSendonly)
+{
+ Init(SdpMediaSection::kVideo);
+ mRecvAns = nullptr;
+ OfferAnswer();
+ CheckOffEncodingCount(0);
+ CheckAnsEncodingCount(1);
+}
+
+static JsepTrack::JsConstraints
+MakeConstraints(const std::string& rid, uint32_t maxBitrate)
+{
+ JsepTrack::JsConstraints constraints;
+ constraints.rid = rid;
+ constraints.constraints.maxBr = maxBitrate;
+ return constraints;
+}
+
+TEST_F(JsepTrackTest, SimulcastRejected)
+{
+ Init(SdpMediaSection::kVideo);
+ std::vector<JsepTrack::JsConstraints> constraints;
+ constraints.push_back(MakeConstraints("foo", 40000));
+ constraints.push_back(MakeConstraints("bar", 10000));
+ mSendOff->SetJsConstraints(constraints);
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+}
+
+TEST_F(JsepTrackTest, SimulcastPrevented)
+{
+ Init(SdpMediaSection::kVideo);
+ std::vector<JsepTrack::JsConstraints> constraints;
+ constraints.push_back(MakeConstraints("foo", 40000));
+ constraints.push_back(MakeConstraints("bar", 10000));
+ mSendAns->SetJsConstraints(constraints);
+ OfferAnswer();
+ CheckOffEncodingCount(1);
+ CheckAnsEncodingCount(1);
+}
+
+TEST_F(JsepTrackTest, SimulcastOfferer)
+{
+ Init(SdpMediaSection::kVideo);
+ std::vector<JsepTrack::JsConstraints> constraints;
+ constraints.push_back(MakeConstraints("foo", 40000));
+ constraints.push_back(MakeConstraints("bar", 10000));
+ mSendOff->SetJsConstraints(constraints);
+ CreateOffer();
+ CreateAnswer();
+ // Add simulcast/rid to answer
+ JsepTrack::AddToMsection(constraints, sdp::kRecv, &GetAnswer());
+ Negotiate();
+ ASSERT_TRUE(mSendOff->GetNegotiatedDetails());
+ ASSERT_EQ(2U, mSendOff->GetNegotiatedDetails()->GetEncodingCount());
+ ASSERT_EQ("foo", mSendOff->GetNegotiatedDetails()->GetEncoding(0).mRid);
+ ASSERT_EQ(40000U,
+ mSendOff->GetNegotiatedDetails()->GetEncoding(0).mConstraints.maxBr);
+ ASSERT_EQ("bar", mSendOff->GetNegotiatedDetails()->GetEncoding(1).mRid);
+ ASSERT_EQ(10000U,
+ mSendOff->GetNegotiatedDetails()->GetEncoding(1).mConstraints.maxBr);
+}
+
+TEST_F(JsepTrackTest, SimulcastAnswerer)
+{
+ Init(SdpMediaSection::kVideo);
+ std::vector<JsepTrack::JsConstraints> constraints;
+ constraints.push_back(MakeConstraints("foo", 40000));
+ constraints.push_back(MakeConstraints("bar", 10000));
+ mSendAns->SetJsConstraints(constraints);
+ CreateOffer();
+ // Add simulcast/rid to offer
+ JsepTrack::AddToMsection(constraints, sdp::kRecv, &GetOffer());
+ CreateAnswer();
+ Negotiate();
+ ASSERT_TRUE(mSendAns->GetNegotiatedDetails());
+ ASSERT_EQ(2U, mSendAns->GetNegotiatedDetails()->GetEncodingCount());
+ ASSERT_EQ("foo", mSendAns->GetNegotiatedDetails()->GetEncoding(0).mRid);
+ ASSERT_EQ(40000U,
+ mSendAns->GetNegotiatedDetails()->GetEncoding(0).mConstraints.maxBr);
+ ASSERT_EQ("bar", mSendAns->GetNegotiatedDetails()->GetEncoding(1).mRid);
+ ASSERT_EQ(10000U,
+ mSendAns->GetNegotiatedDetails()->GetEncoding(1).mConstraints.maxBr);
+}
+
+#define VERIFY_OPUS_MAX_PLAYBACK_RATE(track, expectedRate) \
+{ \
+ JsepTrack& copy(track); \
+ ASSERT_TRUE(copy.GetNegotiatedDetails()); \
+ ASSERT_TRUE(copy.GetNegotiatedDetails()->GetEncodingCount()); \
+ for (auto codec : copy.GetNegotiatedDetails()->GetEncoding(0).GetCodecs()) {\
+ if (codec->mName == "opus") { \
+ JsepAudioCodecDescription* audioCodec = \
+ static_cast<JsepAudioCodecDescription*>(codec); \
+ ASSERT_EQ((expectedRate), audioCodec->mMaxPlaybackRate); \
+ } \
+ }; \
+}
+
+#define VERIFY_OPUS_FORCE_MONO(track, expected) \
+{ \
+ JsepTrack& copy(track); \
+ ASSERT_TRUE(copy.GetNegotiatedDetails()); \
+ ASSERT_TRUE(copy.GetNegotiatedDetails()->GetEncodingCount()); \
+ for (auto codec : copy.GetNegotiatedDetails()->GetEncoding(0).GetCodecs()) {\
+ if (codec->mName == "opus") { \
+ JsepAudioCodecDescription* audioCodec = \
+ static_cast<JsepAudioCodecDescription*>(codec); \
+ /* gtest has some compiler warnings when using ASSERT_EQ with booleans. */ \
+ ASSERT_EQ((int)(expected), (int)audioCodec->mForceMono); \
+ } \
+ }; \
+}
+
+TEST_F(JsepTrackTest, DefaultOpusParameters)
+{
+ Init(SdpMediaSection::kAudio);
+ OfferAnswer();
+
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mSendOff,
+ SdpFmtpAttributeList::OpusParameters::kDefaultMaxPlaybackRate);
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mSendAns,
+ SdpFmtpAttributeList::OpusParameters::kDefaultMaxPlaybackRate);
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mRecvOff, 0U);
+ VERIFY_OPUS_FORCE_MONO(*mRecvOff, false);
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mRecvAns, 0U);
+ VERIFY_OPUS_FORCE_MONO(*mRecvAns, false);
+}
+
+TEST_F(JsepTrackTest, NonDefaultOpusParameters)
+{
+ InitCodecs();
+ for (auto& codec : mAnsCodecs.values) {
+ if (codec->mName == "opus") {
+ JsepAudioCodecDescription* audioCodec =
+ static_cast<JsepAudioCodecDescription*>(codec);
+ audioCodec->mMaxPlaybackRate = 16000;
+ audioCodec->mForceMono = true;
+ }
+ }
+ InitTracks(SdpMediaSection::kAudio);
+ InitSdp(SdpMediaSection::kAudio);
+ OfferAnswer();
+
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mSendOff, 16000U);
+ VERIFY_OPUS_FORCE_MONO(*mSendOff, true);
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mSendAns,
+ SdpFmtpAttributeList::OpusParameters::kDefaultMaxPlaybackRate);
+ VERIFY_OPUS_FORCE_MONO(*mSendAns, false);
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mRecvOff, 0U);
+ VERIFY_OPUS_FORCE_MONO(*mRecvOff, false);
+ VERIFY_OPUS_MAX_PLAYBACK_RATE(*mRecvAns, 16000U);
+ VERIFY_OPUS_FORCE_MONO(*mRecvAns, true);
+}
+
+} // namespace mozilla
+
+int
+main(int argc, char** argv)
+{
+ // Prevents some log spew
+ ScopedXPCOM xpcom("jsep_track_unittest");
+
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+
diff --git a/media/webrtc/signaling/test/mediaconduit_unittests.cpp b/media/webrtc/signaling/test/mediaconduit_unittests.cpp
new file mode 100644
index 000000000..f0cf95a47
--- /dev/null
+++ b/media/webrtc/signaling/test/mediaconduit_unittests.cpp
@@ -0,0 +1,1091 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <iostream>
+#include <string>
+#include <fstream>
+#include <unistd.h>
+#include <vector>
+#include <math.h>
+
+using namespace std;
+
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/UniquePtr.h"
+#include <MediaConduitInterface.h>
+#include "GmpVideoCodec.h"
+#include "nsIEventTarget.h"
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+#include "nsThreadUtils.h"
+#include "runnable_utils.h"
+#include "signaling/src/common/EncodingConstraints.h"
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+nsCOMPtr<nsIThread> gMainThread;
+nsCOMPtr<nsIThread> gGtestThread;
+bool gTestsComplete = false;
+
+#include "mtransport_test_utils.h"
+MtransportTestUtils *test_utils;
+
+//Video Frame Color
+const int COLOR = 0x80; //Gray
+
+//MWC RNG of George Marsaglia
+//taken from xiph.org
+static int32_t Rz, Rw;
+static inline int32_t fast_rand(void)
+{
+ Rz=36969*(Rz&65535)+(Rz>>16);
+ Rw=18000*(Rw&65535)+(Rw>>16);
+ return (Rz<<16)+Rw;
+}
+
+/**
+ * Global structure to store video test results.
+ */
+struct VideoTestStats
+{
+ int numRawFramesInserted;
+ int numFramesRenderedSuccessfully;
+ int numFramesRenderedWrongly;
+};
+
+VideoTestStats vidStatsGlobal={0,0,0};
+
+/**
+ * A Dummy Video Conduit Tester.
+ * The test-case inserts a 640*480 grey imagerevery 33 milliseconds
+ * to the video-conduit for encoding and transporting.
+ */
+
+class VideoSendAndReceive
+{
+public:
+ VideoSendAndReceive():width(640),
+ height(480),
+ rate(30)
+ {
+ }
+
+ ~VideoSendAndReceive()
+ {
+ }
+
+ void SetDimensions(int w, int h)
+ {
+ width = w;
+ height = h;
+ }
+ void SetRate(int r) {
+ rate = r;
+ }
+ void Init(RefPtr<mozilla::VideoSessionConduit> aSession)
+ {
+ mSession = aSession;
+ mLen = ((width * height) * 3 / 2);
+ mFrame = mozilla::MakeUnique<uint8_t[]>(mLen);
+ memset(mFrame.get(), COLOR, mLen);
+ numFrames = 121;
+ }
+
+ void GenerateAndReadSamples()
+ {
+ do
+ {
+ mSession->SendVideoFrame(reinterpret_cast<unsigned char*>(mFrame.get()),
+ mLen,
+ width,
+ height,
+ mozilla::kVideoI420,
+ 0);
+ PR_Sleep(PR_MillisecondsToInterval(1000/rate));
+ vidStatsGlobal.numRawFramesInserted++;
+ numFrames--;
+ } while(numFrames >= 0);
+ }
+
+private:
+RefPtr<mozilla::VideoSessionConduit> mSession;
+mozilla::UniquePtr<uint8_t[]> mFrame;
+int mLen;
+int width, height;
+int rate;
+int numFrames;
+};
+
+
+
+/**
+ * A Dummy AudioConduit Tester
+ * The test reads PCM samples of a standard test file and
+ * passws to audio-conduit for encoding, RTPfication and
+ * decoding ebery 10 milliseconds.
+ * This decoded samples are read-off the conduit for writing
+ * into output audio file in PCM format.
+ */
+class AudioSendAndReceive
+{
+public:
+ static const unsigned int PLAYOUT_SAMPLE_FREQUENCY; //default is 16000
+ static const unsigned int PLAYOUT_SAMPLE_LENGTH; //default is 160000
+
+ AudioSendAndReceive()
+ {
+ }
+
+ ~AudioSendAndReceive()
+ {
+ }
+
+ void Init(RefPtr<mozilla::AudioSessionConduit> aSession,
+ RefPtr<mozilla::AudioSessionConduit> aOtherSession,
+ std::string fileIn, std::string fileOut)
+ {
+
+ mSession = aSession;
+ mOtherSession = aOtherSession;
+ iFile = fileIn;
+ oFile = fileOut;
+ }
+
+ //Kick start the test
+ void GenerateAndReadSamples();
+
+private:
+
+ RefPtr<mozilla::AudioSessionConduit> mSession;
+ RefPtr<mozilla::AudioSessionConduit> mOtherSession;
+ std::string iFile;
+ std::string oFile;
+
+ int WriteWaveHeader(int rate, int channels, FILE* outFile);
+ int FinishWaveHeader(FILE* outFile);
+ void GenerateMusic(int16_t* buf, int len);
+};
+
+const unsigned int AudioSendAndReceive::PLAYOUT_SAMPLE_FREQUENCY = 16000;
+const unsigned int AudioSendAndReceive::PLAYOUT_SAMPLE_LENGTH = 160000;
+
+int AudioSendAndReceive::WriteWaveHeader(int rate, int channels, FILE* outFile)
+{
+ //Hardcoded for 16 bit samples
+ unsigned char header[] = {
+ // File header
+ 0x52, 0x49, 0x46, 0x46, // 'RIFF'
+ 0x00, 0x00, 0x00, 0x00, // chunk size
+ 0x57, 0x41, 0x56, 0x45, // 'WAVE'
+ // fmt chunk. We always write 16-bit samples.
+ 0x66, 0x6d, 0x74, 0x20, // 'fmt '
+ 0x10, 0x00, 0x00, 0x00, // chunk size
+ 0x01, 0x00, // WAVE_FORMAT_PCM
+ 0xFF, 0xFF, // channels
+ 0xFF, 0xFF, 0xFF, 0xFF, // sample rate
+ 0x00, 0x00, 0x00, 0x00, // data rate
+ 0xFF, 0xFF, // frame size in bytes
+ 0x10, 0x00, // bits per sample
+ // data chunk
+ 0x64, 0x61, 0x74, 0x61, // 'data'
+ 0xFE, 0xFF, 0xFF, 0x7F // chunk size
+ };
+
+#define set_uint16le(buffer, value) \
+ (buffer)[0] = (value) & 0xff; \
+ (buffer)[1] = (value) >> 8;
+#define set_uint32le(buffer, value) \
+ set_uint16le( (buffer), (value) & 0xffff ); \
+ set_uint16le( (buffer) + 2, (value) >> 16 );
+
+ // set dynamic header fields
+ set_uint16le(header + 22, channels);
+ set_uint32le(header + 24, rate);
+ set_uint16le(header + 32, channels*2);
+
+ size_t written = fwrite(header, 1, sizeof(header), outFile);
+ if (written != sizeof(header)) {
+ cerr << "Writing WAV header failed" << endl;
+ return -1;
+ }
+
+ return 0;
+}
+
+// Update the WAVE file header with the written length
+int AudioSendAndReceive::FinishWaveHeader(FILE* outFile)
+{
+ // Measure how much data we've written
+ long end = ftell(outFile);
+ if (end < 16) {
+ cerr << "Couldn't get output file length" << endl;
+ return (end < 0) ? end : -1;
+ }
+
+ // Update the header
+ unsigned char size[4];
+ int err = fseek(outFile, 40, SEEK_SET);
+ if (err < 0) {
+ cerr << "Couldn't seek to WAV file header." << endl;
+ return err;
+ }
+ set_uint32le(size, (end - 44) & 0xffffffff);
+ size_t written = fwrite(size, 1, sizeof(size), outFile);
+ if (written != sizeof(size)) {
+ cerr << "Couldn't write data size to WAV header" << endl;
+ return -1;
+ }
+
+ // Return to the end
+ err = fseek(outFile, 0, SEEK_END);
+ if (err < 0) {
+ cerr << "Couldn't seek to WAV file end." << endl;
+ return err;
+ }
+
+ return 0;
+}
+
+//Code from xiph.org to generate music of predefined length
+void AudioSendAndReceive::GenerateMusic(short* buf, int len)
+{
+ cerr <<" Generating Input Music " << endl;
+ int32_t a1,a2,b1,b2;
+ int32_t c1,c2,d1,d2;
+ int32_t i,j;
+ a1=b1=a2=b2=0;
+ c1=c2=d1=d2=0;
+ j=0;
+ /*60ms silence */
+ for(i=0;i<2880;i++)
+ {
+ buf[i*2]=buf[(i*2)+1]=0;
+ }
+ for(i=2880;i<len-1;i+=2)
+ {
+ int32_t r;
+ int32_t v1,v2;
+ v1=v2=(((j*((j>>12)^((j>>10|j>>12)&26&j>>7)))&128)+128)<<15;
+ r=fast_rand();v1+=r&65535;v1-=r>>16;
+ r=fast_rand();v2+=r&65535;v2-=r>>16;
+ b1=v1-a1+((b1*61+32)>>6);a1=v1;
+ b2=v2-a2+((b2*61+32)>>6);a2=v2;
+ c1=(30*(c1+b1+d1)+32)>>6;d1=b1;
+ c2=(30*(c2+b2+d2)+32)>>6;d2=b2;
+ v1=(c1+128)>>8;
+ v2=(c2+128)>>8;
+ buf[i]=v1>32767?32767:(v1<-32768?-32768:v1);
+ buf[i+1]=v2>32767?32767:(v2<-32768?-32768:v2);
+ if(i%6==0)j++;
+ }
+ cerr << "Generating Input Music Done " << endl;
+}
+
+//Hardcoded for 16 bit samples for now
+void AudioSendAndReceive::GenerateAndReadSamples()
+{
+ auto audioInput = mozilla::MakeUnique<int16_t []>(PLAYOUT_SAMPLE_LENGTH);
+ auto audioOutput = mozilla::MakeUnique<int16_t []>(PLAYOUT_SAMPLE_LENGTH);
+ short* inbuf;
+ int sampleLengthDecoded = 0;
+ unsigned int SAMPLES = (PLAYOUT_SAMPLE_FREQUENCY * 10); //10 seconds
+ int CHANNELS = 1; //mono audio
+ int sampleLengthInBytes = sizeof(int16_t) * PLAYOUT_SAMPLE_LENGTH;
+ //generated audio buffer
+ inbuf = (short *)moz_xmalloc(sizeof(short)*SAMPLES*CHANNELS);
+ memset(audioInput.get(),0,sampleLengthInBytes);
+ memset(audioOutput.get(),0,sampleLengthInBytes);
+ MOZ_ASSERT(SAMPLES <= PLAYOUT_SAMPLE_LENGTH);
+
+ FILE* inFile = fopen( iFile.c_str(), "wb+");
+ if(!inFile) {
+ cerr << "Input File Creation Failed " << endl;
+ free(inbuf);
+ return;
+ }
+
+ FILE* outFile = fopen( oFile.c_str(), "wb+");
+ if(!outFile) {
+ cerr << "Output File Creation Failed " << endl;
+ free(inbuf);
+ fclose(inFile);
+ return;
+ }
+
+ //Create input file with the music
+ WriteWaveHeader(PLAYOUT_SAMPLE_FREQUENCY, 1, inFile);
+ GenerateMusic(inbuf, SAMPLES);
+ fwrite(inbuf,1,SAMPLES*sizeof(inbuf[0])*CHANNELS,inFile);
+ FinishWaveHeader(inFile);
+ fclose(inFile);
+
+ WriteWaveHeader(PLAYOUT_SAMPLE_FREQUENCY, 1, outFile);
+ unsigned int numSamplesReadFromInput = 0;
+ do
+ {
+ if(!memcpy(audioInput.get(), inbuf, sampleLengthInBytes))
+ {
+ free(inbuf);
+ fclose(outFile);
+ return;
+ }
+
+ numSamplesReadFromInput += PLAYOUT_SAMPLE_LENGTH;
+ inbuf += PLAYOUT_SAMPLE_LENGTH;
+
+ mSession->SendAudioFrame(audioInput.get(),
+ PLAYOUT_SAMPLE_LENGTH,
+ PLAYOUT_SAMPLE_FREQUENCY,10);
+
+ PR_Sleep(PR_MillisecondsToInterval(10));
+ mOtherSession->GetAudioFrame(audioOutput.get(), PLAYOUT_SAMPLE_FREQUENCY,
+ 10, sampleLengthDecoded);
+ if(sampleLengthDecoded == 0)
+ {
+ cerr << " Zero length Sample " << endl;
+ }
+
+ int wrote_ = fwrite (audioOutput.get(), 1 , sampleLengthInBytes, outFile);
+ if(wrote_ != sampleLengthInBytes)
+ {
+ cerr << "Couldn't Write " << sampleLengthInBytes << "bytes" << endl;
+ break;
+ }
+ }while(numSamplesReadFromInput < SAMPLES);
+
+ FinishWaveHeader(outFile);
+ free(inbuf);
+ fclose(outFile);
+}
+
+/**
+ * Dummy Video Target for the conduit
+ * This class acts as renderer attached to the video conuit
+ * As of today we just verify if the frames rendered are exactly
+ * the same as frame inserted at the first place
+ */
+class DummyVideoTarget: public mozilla::VideoRenderer
+{
+public:
+ DummyVideoTarget()
+ {
+ }
+
+ virtual ~DummyVideoTarget()
+ {
+ }
+
+
+ void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t y_stride,
+ uint32_t cbcr_stride,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const mozilla::ImageHandle& handle) override
+ {
+ RenderVideoFrame(buffer, buffer_size, time_stamp, render_time, handle);
+ }
+
+ void RenderVideoFrame(const unsigned char* buffer,
+ size_t buffer_size,
+ uint32_t time_stamp,
+ int64_t render_time,
+ const mozilla::ImageHandle& handle) override
+ {
+ //write the frame to the file
+ if(VerifyFrame(buffer, buffer_size) == 0)
+ {
+ vidStatsGlobal.numFramesRenderedSuccessfully++;
+ } else
+ {
+ vidStatsGlobal.numFramesRenderedWrongly++;
+ }
+ }
+
+ void FrameSizeChange(unsigned int, unsigned int, unsigned int) override
+ {
+ //do nothing
+ }
+
+ //This is hardcoded to check if the contents of frame is COLOR
+ // as we set while sending.
+ int VerifyFrame(const unsigned char* buffer, unsigned int buffer_size)
+ {
+ int good = 0;
+ for(int i=0; i < (int) buffer_size; i++)
+ {
+ if(buffer[i] == COLOR)
+ {
+ ++good;
+ }
+ else
+ {
+ --good;
+ }
+ }
+ return 0;
+ }
+
+};
+
+/**
+ * Webrtc Audio and Video External Transport Class
+ * The functions in this class will be invoked by the conduit
+ * when it has RTP/RTCP frame to transmit.
+ * For everty RTP/RTCP frame we receive, we pass it back
+ * to the conduit for eventual decoding and rendering.
+ */
+class WebrtcMediaTransport : public mozilla::TransportInterface
+{
+public:
+ WebrtcMediaTransport():numPkts(0),
+ mAudio(false),
+ mVideo(false)
+ {
+ }
+
+ ~WebrtcMediaTransport()
+ {
+ }
+
+ virtual nsresult SendRtpPacket(const void* data, int len)
+ {
+ ++numPkts;
+ if(mAudio)
+ {
+ mOtherAudioSession->ReceivedRTPPacket(data,len);
+ } else
+ {
+ mOtherVideoSession->ReceivedRTPPacket(data,len);
+ }
+ return NS_OK;
+ }
+
+ virtual nsresult SendRtcpPacket(const void* data, int len)
+ {
+ if(mAudio)
+ {
+ mOtherAudioSession->ReceivedRTCPPacket(data,len);
+ } else
+ {
+ mOtherVideoSession->ReceivedRTCPPacket(data,len);
+ }
+ return NS_OK;
+ }
+
+ //Treat this object as Audio Transport
+ void SetAudioSession(RefPtr<mozilla::AudioSessionConduit> aSession,
+ RefPtr<mozilla::AudioSessionConduit>
+ aOtherSession)
+ {
+ mAudioSession = aSession;
+ mOtherAudioSession = aOtherSession;
+ mAudio = true;
+ }
+
+ // Treat this object as Video Transport
+ void SetVideoSession(RefPtr<mozilla::VideoSessionConduit> aSession,
+ RefPtr<mozilla::VideoSessionConduit>
+ aOtherSession)
+ {
+ mVideoSession = aSession;
+ mOtherVideoSession = aOtherSession;
+ mVideo = true;
+ }
+
+private:
+ RefPtr<mozilla::AudioSessionConduit> mAudioSession;
+ RefPtr<mozilla::VideoSessionConduit> mVideoSession;
+ RefPtr<mozilla::VideoSessionConduit> mOtherVideoSession;
+ RefPtr<mozilla::AudioSessionConduit> mOtherAudioSession;
+ int numPkts;
+ bool mAudio, mVideo;
+};
+
+
+namespace {
+
+class TransportConduitTest : public ::testing::Test
+{
+ public:
+
+ TransportConduitTest()
+ {
+ //input and output file names
+ iAudiofilename = "input.wav";
+ oAudiofilename = "recorded.wav";
+ }
+
+ ~TransportConduitTest()
+ {
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ mozilla::WrapRunnable(
+ this,
+ &TransportConduitTest::SelfDestruct));
+ }
+
+ void SelfDestruct() {
+ mAudioSession = nullptr;
+ mAudioSession2 = nullptr;
+ mAudioTransport = nullptr;
+
+ mVideoSession = nullptr;
+ mVideoSession2 = nullptr;
+ mVideoRenderer = nullptr;
+ mVideoTransport = nullptr;
+ }
+
+ //1. Dump audio samples to dummy external transport
+ void TestDummyAudioAndTransport()
+ {
+ //get pointer to AudioSessionConduit
+ int err=0;
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnableNMRet(&mAudioSession,
+ &mozilla::AudioSessionConduit::Create));
+ if( !mAudioSession )
+ ASSERT_NE(mAudioSession, (void*)nullptr);
+
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnableNMRet(&mAudioSession2,
+ &mozilla::AudioSessionConduit::Create));
+ if( !mAudioSession2 )
+ ASSERT_NE(mAudioSession2, (void*)nullptr);
+
+ WebrtcMediaTransport* xport = new WebrtcMediaTransport();
+ ASSERT_NE(xport, (void*)nullptr);
+ xport->SetAudioSession(mAudioSession, mAudioSession2);
+ mAudioTransport = xport;
+
+ // attach the transport to audio-conduit
+ err = mAudioSession->SetTransmitterTransport(mAudioTransport);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mAudioSession2->SetReceiverTransport(mAudioTransport);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ //configure send and recv codecs on the audio-conduit
+ //mozilla::AudioCodecConfig cinst1(124, "PCMU", 8000, 80, 1, 64000, false);
+ mozilla::AudioCodecConfig cinst1(124, "opus", 48000, 960, 1, 64000, false);
+ mozilla::AudioCodecConfig cinst2(125, "L16", 16000, 320, 1, 256000, false);
+
+ std::vector<mozilla::AudioCodecConfig*> rcvCodecList;
+ rcvCodecList.push_back(&cinst1);
+ rcvCodecList.push_back(&cinst2);
+
+ err = mAudioSession->ConfigureSendMediaCodec(&cinst1);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mAudioSession->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mAudioSession->ConfigureRecvMediaCodecs(rcvCodecList);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ err = mAudioSession2->ConfigureSendMediaCodec(&cinst1);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mAudioSession2->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mAudioSession2->ConfigureRecvMediaCodecs(rcvCodecList);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ //start generating samples
+ audioTester.Init(mAudioSession,mAudioSession2, iAudiofilename,oAudiofilename);
+ cerr << " ******************************************************** " << endl;
+ cerr << " Generating Audio Samples " << endl;
+ cerr << " ******************************************************** " << endl;
+ PR_Sleep(PR_SecondsToInterval(2));
+ audioTester.GenerateAndReadSamples();
+ PR_Sleep(PR_SecondsToInterval(2));
+ cerr << " ******************************************************** " << endl;
+ cerr << " Input Audio File " << iAudiofilename << endl;
+ cerr << " Output Audio File " << oAudiofilename << endl;
+ cerr << " ******************************************************** " << endl;
+ }
+
+ //2. Dump audio samples to dummy external transport
+ void TestDummyVideoAndTransport(bool send_vp8 = true, const char *source_file = nullptr)
+ {
+ int err = 0;
+ //get pointer to VideoSessionConduit
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnableNMRet(&mVideoSession,
+ &mozilla::VideoSessionConduit::Create));
+ if( !mVideoSession )
+ ASSERT_NE(mVideoSession, (void*)nullptr);
+
+ // This session is for other one
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnableNMRet(&mVideoSession2,
+ &mozilla::VideoSessionConduit::Create));
+ if( !mVideoSession2 )
+ ASSERT_NE(mVideoSession2,(void*)nullptr);
+
+ if (!send_vp8) {
+ SetGmpCodecs();
+ }
+
+ mVideoRenderer = new DummyVideoTarget();
+ ASSERT_NE(mVideoRenderer, (void*)nullptr);
+
+ WebrtcMediaTransport* xport = new WebrtcMediaTransport();
+ ASSERT_NE(xport, (void*)nullptr);
+ xport->SetVideoSession(mVideoSession,mVideoSession2);
+ mVideoTransport = xport;
+
+ // attach the transport and renderer to video-conduit
+ err = mVideoSession2->AttachRenderer(mVideoRenderer);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mVideoSession->SetTransmitterTransport(mVideoTransport);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mVideoSession2->SetReceiverTransport(mVideoTransport);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ mozilla::EncodingConstraints constraints;
+ //configure send and recv codecs on theconduit
+ mozilla::VideoCodecConfig cinst1(120, "VP8", constraints);
+ mozilla::VideoCodecConfig cinst2(124, "I420", constraints);
+
+
+ std::vector<mozilla::VideoCodecConfig* > rcvCodecList;
+ rcvCodecList.push_back(&cinst1);
+ rcvCodecList.push_back(&cinst2);
+
+ err = mVideoSession->ConfigureSendMediaCodec(
+ send_vp8 ? &cinst1 : &cinst2);
+
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mVideoSession->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ err = mVideoSession2->ConfigureSendMediaCodec(
+ send_vp8 ? &cinst1 : &cinst2);
+ err = mVideoSession2->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mVideoSession2->ConfigureRecvMediaCodecs(rcvCodecList);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ //start generating samples
+ cerr << " *************************************************" << endl;
+ cerr << " Starting the Video Sample Generation " << endl;
+ cerr << " *************************************************" << endl;
+ PR_Sleep(PR_SecondsToInterval(2));
+ videoTester.Init(mVideoSession);
+ videoTester.GenerateAndReadSamples();
+ PR_Sleep(PR_SecondsToInterval(2));
+
+ cerr << " **************************************************" << endl;
+ cerr << " Done With The Testing " << endl;
+ cerr << " VIDEO TEST STATS " << endl;
+ cerr << " Num Raw Frames Inserted: "<<
+ vidStatsGlobal.numRawFramesInserted << endl;
+ cerr << " Num Frames Successfully Rendered: "<<
+ vidStatsGlobal.numFramesRenderedSuccessfully << endl;
+ cerr << " Num Frames Wrongly Rendered: "<<
+ vidStatsGlobal.numFramesRenderedWrongly << endl;
+
+ cerr << " Done With The Testing " << endl;
+
+ cerr << " **************************************************" << endl;
+ ASSERT_EQ(0, vidStatsGlobal.numFramesRenderedWrongly);
+ if (send_vp8) {
+ ASSERT_EQ(vidStatsGlobal.numRawFramesInserted,
+ vidStatsGlobal.numFramesRenderedSuccessfully);
+ }
+ else {
+ // Allow some fudge because there seems to be some buffering.
+ // TODO(ekr@rtfm.com): Fix this.
+ ASSERT_GE(vidStatsGlobal.numRawFramesInserted,
+ vidStatsGlobal.numFramesRenderedSuccessfully);
+ ASSERT_LE(vidStatsGlobal.numRawFramesInserted,
+ vidStatsGlobal.numFramesRenderedSuccessfully + 2);
+ }
+ }
+
+ void TestVideoConduitCodecAPI()
+ {
+ int err = 0;
+ RefPtr<mozilla::VideoSessionConduit> videoSession;
+ //get pointer to VideoSessionConduit
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnableNMRet(&videoSession,
+ &mozilla::VideoSessionConduit::Create));
+ if( !videoSession )
+ ASSERT_NE(videoSession, (void*)nullptr);
+
+ //Test Configure Recv Codec APIS
+ cerr << " *************************************************" << endl;
+ cerr << " Test Receive Codec Configuration API Now " << endl;
+ cerr << " *************************************************" << endl;
+
+ std::vector<mozilla::VideoCodecConfig* > rcvCodecList;
+
+ //Same APIs
+ cerr << " *************************************************" << endl;
+ cerr << " 1. Same Codec (VP8) Repeated Twice " << endl;
+ cerr << " *************************************************" << endl;
+
+ mozilla::EncodingConstraints constraints;
+ mozilla::VideoCodecConfig cinst1(120, "VP8", constraints);
+ mozilla::VideoCodecConfig cinst2(120, "VP8", constraints);
+ rcvCodecList.push_back(&cinst1);
+ rcvCodecList.push_back(&cinst2);
+ err = videoSession->ConfigureRecvMediaCodecs(rcvCodecList);
+ EXPECT_NE(err,mozilla::kMediaConduitNoError);
+ rcvCodecList.pop_back();
+ rcvCodecList.pop_back();
+
+
+ PR_Sleep(PR_SecondsToInterval(2));
+ cerr << " *************************************************" << endl;
+ cerr << " 2. Codec With Invalid Payload Names " << endl;
+ cerr << " *************************************************" << endl;
+ cerr << " Setting payload 1 with name: I4201234tttttthhhyyyy89087987y76t567r7756765rr6u6676" << endl;
+ cerr << " Setting payload 2 with name of zero length" << endl;
+
+ mozilla::VideoCodecConfig cinst3(124, "I4201234tttttthhhyyyy89087987y76t567r7756765rr6u6676", constraints);
+ mozilla::VideoCodecConfig cinst4(124, "", constraints);
+
+ rcvCodecList.push_back(&cinst3);
+ rcvCodecList.push_back(&cinst4);
+
+ err = videoSession->ConfigureRecvMediaCodecs(rcvCodecList);
+ EXPECT_TRUE(err != mozilla::kMediaConduitNoError);
+ rcvCodecList.pop_back();
+ rcvCodecList.pop_back();
+
+
+ PR_Sleep(PR_SecondsToInterval(2));
+ cerr << " *************************************************" << endl;
+ cerr << " 3. Null Codec Parameter " << endl;
+ cerr << " *************************************************" << endl;
+
+ rcvCodecList.push_back(0);
+
+ err = videoSession->ConfigureRecvMediaCodecs(rcvCodecList);
+ EXPECT_TRUE(err != mozilla::kMediaConduitNoError);
+ rcvCodecList.pop_back();
+
+ cerr << " *************************************************" << endl;
+ cerr << " Test Send Codec Configuration API Now " << endl;
+ cerr << " *************************************************" << endl;
+
+ cerr << " *************************************************" << endl;
+ cerr << " 1. Same Codec (VP8) Repeated Twice " << endl;
+ cerr << " *************************************************" << endl;
+
+
+ err = videoSession->ConfigureSendMediaCodec(&cinst1);
+ EXPECT_EQ(mozilla::kMediaConduitNoError, err);
+ err = videoSession->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = videoSession->ConfigureSendMediaCodec(&cinst1);
+ EXPECT_EQ(mozilla::kMediaConduitCodecInUse, err);
+ err = videoSession->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+
+ cerr << " *************************************************" << endl;
+ cerr << " 2. Codec With Invalid Payload Names " << endl;
+ cerr << " *************************************************" << endl;
+ cerr << " Setting payload with name: I4201234tttttthhhyyyy89087987y76t567r7756765rr6u6676" << endl;
+
+ err = videoSession->ConfigureSendMediaCodec(&cinst3);
+ EXPECT_TRUE(err != mozilla::kMediaConduitNoError);
+
+ cerr << " *************************************************" << endl;
+ cerr << " 3. Null Codec Parameter " << endl;
+ cerr << " *************************************************" << endl;
+
+ err = videoSession->ConfigureSendMediaCodec(nullptr);
+ EXPECT_TRUE(err != mozilla::kMediaConduitNoError);
+
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnable(
+ videoSession.forget().take(),
+ &mozilla::VideoSessionConduit::Release));
+ }
+
+ void DumpMaxFs(int orig_width, int orig_height, int max_fs,
+ int new_width, int new_height)
+ {
+ cerr << "Applying max_fs=" << max_fs << " to input resolution " <<
+ orig_width << "x" << orig_height << endl;
+ cerr << "New resolution: " << new_width << "x" << new_height << endl;
+ cerr << endl;
+ }
+
+ // Calculate new resolution for sending video by applying max-fs constraint.
+ void GetVideoResolutionWithMaxFs(int orig_width, int orig_height, int max_fs,
+ int *new_width, int *new_height)
+ {
+ int err = 0;
+
+ // Get pointer to VideoSessionConduit.
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnableNMRet(&mVideoSession,
+ &mozilla::VideoSessionConduit::Create));
+ if( !mVideoSession )
+ ASSERT_NE(mVideoSession, (void*)nullptr);
+
+ mozilla::EncodingConstraints constraints;
+ constraints.maxFs = max_fs;
+ // Configure send codecs on the conduit.
+ mozilla::VideoCodecConfig cinst1(120, "VP8", constraints);
+
+ err = mVideoSession->ConfigureSendMediaCodec(&cinst1);
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+ err = mVideoSession->StartTransmitting();
+ ASSERT_EQ(mozilla::kMediaConduitNoError, err);
+
+ // Send one frame.
+ MOZ_ASSERT(!(orig_width & 1));
+ MOZ_ASSERT(!(orig_height & 1));
+ int len = ((orig_width * orig_height) * 3 / 2);
+ uint8_t* frame = (uint8_t*) PR_MALLOC(len);
+
+ memset(frame, COLOR, len);
+ mVideoSession->SendVideoFrame((unsigned char*)frame,
+ len,
+ orig_width,
+ orig_height,
+ mozilla::kVideoI420,
+ 0);
+ PR_Free(frame);
+
+ // Get the new resolution as adjusted by the max-fs constraint.
+ *new_width = mVideoSession->SendingWidth();
+ *new_height = mVideoSession->SendingHeight();
+ }
+
+ void TestVideoConduitMaxFs()
+ {
+ int orig_width, orig_height, width, height, max_fs;
+
+ // No limitation.
+ cerr << "Test no max-fs limition" << endl;
+ orig_width = 640;
+ orig_height = 480;
+ max_fs = 0;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 640);
+ ASSERT_EQ(height, 480);
+
+ // VGA to QVGA.
+ cerr << "Test resizing from VGA to QVGA" << endl;
+ orig_width = 640;
+ orig_height = 480;
+ max_fs = 300;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 320);
+ ASSERT_EQ(height, 240);
+
+ // Extreme input resolution.
+ cerr << "Test extreme input resolution" << endl;
+ orig_width = 3072;
+ orig_height = 100;
+ max_fs = 300;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 768);
+ ASSERT_EQ(height, 26);
+
+ // Small max-fs.
+ cerr << "Test small max-fs (case 1)" << endl;
+ orig_width = 8;
+ orig_height = 32;
+ max_fs = 1;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 4);
+ ASSERT_EQ(height, 16);
+
+ // Small max-fs.
+ cerr << "Test small max-fs (case 2)" << endl;
+ orig_width = 4;
+ orig_height = 50;
+ max_fs = 1;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 2);
+ ASSERT_EQ(height, 16);
+
+ // Small max-fs.
+ cerr << "Test small max-fs (case 3)" << endl;
+ orig_width = 872;
+ orig_height = 136;
+ max_fs = 3;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 48);
+ ASSERT_EQ(height, 8);
+
+ // Small max-fs.
+ cerr << "Test small max-fs (case 4)" << endl;
+ orig_width = 160;
+ orig_height = 8;
+ max_fs = 5;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 80);
+ ASSERT_EQ(height, 4);
+
+ // Extremely small width and height(see bug 919979).
+ cerr << "Test with extremely small width and height" << endl;
+ orig_width = 2;
+ orig_height = 2;
+ max_fs = 5;
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs, &width, &height);
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ASSERT_EQ(width, 2);
+ ASSERT_EQ(height, 2);
+
+ // Random values.
+ cerr << "Test with random values" << endl;
+ for (int i = 0; i < 30; i++) {
+ cerr << ".";
+ max_fs = rand() % 1000;
+ orig_width = ((rand() % 2000) & ~1) + 2;
+ orig_height = ((rand() % 2000) & ~1) + 2;
+
+ GetVideoResolutionWithMaxFs(orig_width, orig_height, max_fs,
+ &width, &height);
+ if (max_fs > 0 &&
+ ceil(width / 16.) * ceil(height / 16.) > max_fs) {
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ADD_FAILURE();
+ }
+ if ((width & 1) || (height & 1)) {
+ DumpMaxFs(orig_width, orig_height, max_fs, width, height);
+ ADD_FAILURE();
+ }
+ }
+ cerr << endl;
+ }
+
+ void SetGmpCodecs() {
+ mExternalEncoder = mozilla::GmpVideoCodec::CreateEncoder();
+ mExternalDecoder = mozilla::GmpVideoCodec::CreateDecoder();
+ mozilla::EncodingConstraints constraints;
+ mozilla::VideoCodecConfig config(124, "H264", constraints);
+ mVideoSession->SetExternalSendCodec(&config, mExternalEncoder);
+ mVideoSession2->SetExternalRecvCodec(&config, mExternalDecoder);
+ }
+
+ private:
+ //Audio Conduit Test Objects
+ RefPtr<mozilla::AudioSessionConduit> mAudioSession;
+ RefPtr<mozilla::AudioSessionConduit> mAudioSession2;
+ RefPtr<mozilla::TransportInterface> mAudioTransport;
+ AudioSendAndReceive audioTester;
+
+ //Video Conduit Test Objects
+ RefPtr<mozilla::VideoSessionConduit> mVideoSession;
+ RefPtr<mozilla::VideoSessionConduit> mVideoSession2;
+ RefPtr<mozilla::VideoRenderer> mVideoRenderer;
+ RefPtr<mozilla::TransportInterface> mVideoTransport;
+ VideoSendAndReceive videoTester;
+
+ mozilla::VideoEncoder* mExternalEncoder;
+ mozilla::VideoDecoder* mExternalDecoder;
+
+ std::string fileToPlay;
+ std::string fileToRecord;
+ std::string iAudiofilename;
+ std::string oAudiofilename;
+};
+
+
+// Test 1: Test Dummy External Xport
+TEST_F(TransportConduitTest, TestDummyAudioWithTransport) {
+ TestDummyAudioAndTransport();
+}
+
+// Test 2: Test Dummy External Xport
+TEST_F(TransportConduitTest, TestDummyVideoWithTransport) {
+ TestDummyVideoAndTransport();
+ }
+
+TEST_F(TransportConduitTest, TestVideoConduitExternalCodec) {
+ TestDummyVideoAndTransport(false);
+}
+
+TEST_F(TransportConduitTest, TestVideoConduitCodecAPI) {
+ TestVideoConduitCodecAPI();
+ }
+
+TEST_F(TransportConduitTest, TestVideoConduitMaxFs) {
+ TestVideoConduitMaxFs();
+ }
+
+} // end namespace
+
+static int test_result;
+bool test_finished = false;
+
+
+
+// This exists to send as an event to trigger shutdown.
+static void tests_complete() {
+ gTestsComplete = true;
+}
+
+// The GTest thread runs this instead of the main thread so it can
+// do things like ASSERT_TRUE_WAIT which you could not do on the main thread.
+static int gtest_main(int argc, char **argv) {
+ MOZ_ASSERT(!NS_IsMainThread());
+
+ ::testing::InitGoogleTest(&argc, argv);
+
+ int result = RUN_ALL_TESTS();
+
+ // Set the global shutdown flag and tickle the main thread
+ // The main thread did not go through Init() so calling Shutdown()
+ // on it will not work.
+ gMainThread->Dispatch(mozilla::WrapRunnableNM(tests_complete), NS_DISPATCH_SYNC);
+
+ return result;
+}
+
+int main(int argc, char **argv)
+{
+ // This test can cause intermittent oranges on the builders
+ CHECK_ENVIRONMENT_FLAG("MOZ_WEBRTC_MEDIACONDUIT_TESTS")
+
+ test_utils = new MtransportTestUtils();
+
+ // Set the main thread global which is this thread.
+ nsIThread *thread;
+ NS_GetMainThread(&thread);
+ gMainThread = thread;
+
+ // Now create the GTest thread and run all of the tests on it
+ // When it is complete it will set gTestsComplete
+ NS_NewNamedThread("gtest_thread", &thread);
+ gGtestThread = thread;
+
+ int result;
+ gGtestThread->Dispatch(
+ mozilla::WrapRunnableNMRet(&result, gtest_main, argc, argv), NS_DISPATCH_NORMAL);
+
+ // Here we handle the event queue for dispatches to the main thread
+ // When the GTest thread is complete it will send one more dispatch
+ // with gTestsComplete == true.
+ while (!gTestsComplete && NS_ProcessNextEvent());
+
+ gGtestThread->Shutdown();
+
+ delete test_utils;
+ return test_result;
+}
+
+
+
diff --git a/media/webrtc/signaling/test/mediapipeline_unittest.cpp b/media/webrtc/signaling/test/mediapipeline_unittest.cpp
new file mode 100644
index 000000000..33518218b
--- /dev/null
+++ b/media/webrtc/signaling/test/mediapipeline_unittest.cpp
@@ -0,0 +1,720 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Original author: ekr@rtfm.com
+
+#include <iostream>
+
+#include "sigslot.h"
+
+#include "logging.h"
+#include "nsThreadUtils.h"
+#include "nsXPCOM.h"
+#include "nss.h"
+#include "ssl.h"
+#include "sslproto.h"
+
+#include "dtlsidentity.h"
+#include "mozilla/RefPtr.h"
+#include "FakeMediaStreams.h"
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+#include "MediaConduitErrors.h"
+#include "MediaConduitInterface.h"
+#include "MediaPipeline.h"
+#include "MediaPipelineFilter.h"
+#include "runnable_utils.h"
+#include "transportflow.h"
+#include "transportlayerloopback.h"
+#include "transportlayerdtls.h"
+#include "mozilla/SyncRunnable.h"
+
+
+#include "mtransport_test_utils.h"
+#include "runnable_utils.h"
+
+#include "webrtc/modules/interface/module_common_types.h"
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+#include "TestHarness.h"
+
+using namespace mozilla;
+MOZ_MTLOG_MODULE("mediapipeline")
+
+MtransportTestUtils *test_utils;
+
+namespace {
+
+class TransportInfo {
+ public:
+ TransportInfo() :
+ flow_(nullptr),
+ loopback_(nullptr),
+ dtls_(nullptr) {}
+
+ static void InitAndConnect(TransportInfo &client, TransportInfo &server) {
+ client.Init(true);
+ server.Init(false);
+ client.PushLayers();
+ server.PushLayers();
+ client.Connect(&server);
+ server.Connect(&client);
+ }
+
+ void Init(bool client) {
+ nsresult res;
+
+ flow_ = new TransportFlow();
+ loopback_ = new TransportLayerLoopback();
+ dtls_ = new TransportLayerDtls();
+
+ res = loopback_->Init();
+ if (res != NS_OK) {
+ FreeLayers();
+ }
+ ASSERT_EQ((nsresult)NS_OK, res);
+
+ std::vector<uint16_t> ciphers;
+ ciphers.push_back(SRTP_AES128_CM_HMAC_SHA1_80);
+ dtls_->SetSrtpCiphers(ciphers);
+ dtls_->SetIdentity(DtlsIdentity::Generate());
+ dtls_->SetRole(client ? TransportLayerDtls::CLIENT :
+ TransportLayerDtls::SERVER);
+ dtls_->SetVerificationAllowAll();
+ }
+
+ void PushLayers() {
+ nsresult res;
+
+ nsAutoPtr<std::queue<TransportLayer *> > layers(
+ new std::queue<TransportLayer *>);
+ layers->push(loopback_);
+ layers->push(dtls_);
+ res = flow_->PushLayers(layers);
+ if (res != NS_OK) {
+ FreeLayers();
+ }
+ ASSERT_EQ((nsresult)NS_OK, res);
+ }
+
+ void Connect(TransportInfo* peer) {
+ MOZ_ASSERT(loopback_);
+ MOZ_ASSERT(peer->loopback_);
+
+ loopback_->Connect(peer->loopback_);
+ }
+
+ // Free the memory allocated at the beginning of Init
+ // if failure occurs before layers setup.
+ void FreeLayers() {
+ delete loopback_;
+ loopback_ = nullptr;
+ delete dtls_;
+ dtls_ = nullptr;
+ }
+
+ void Shutdown() {
+ if (loopback_) {
+ loopback_->Disconnect();
+ }
+ loopback_ = nullptr;
+ dtls_ = nullptr;
+ flow_ = nullptr;
+ }
+
+ RefPtr<TransportFlow> flow_;
+ TransportLayerLoopback *loopback_;
+ TransportLayerDtls *dtls_;
+};
+
+class TestAgent {
+ public:
+ TestAgent() :
+ audio_config_(109, "opus", 48000, 960, 2, 64000, false),
+ audio_conduit_(mozilla::AudioSessionConduit::Create()),
+ audio_(),
+ audio_pipeline_() {
+ }
+
+ static void ConnectRtp(TestAgent *client, TestAgent *server) {
+ TransportInfo::InitAndConnect(client->audio_rtp_transport_,
+ server->audio_rtp_transport_);
+ }
+
+ static void ConnectRtcp(TestAgent *client, TestAgent *server) {
+ TransportInfo::InitAndConnect(client->audio_rtcp_transport_,
+ server->audio_rtcp_transport_);
+ }
+
+ static void ConnectBundle(TestAgent *client, TestAgent *server) {
+ TransportInfo::InitAndConnect(client->bundle_transport_,
+ server->bundle_transport_);
+ }
+
+ virtual void CreatePipelines_s(bool aIsRtcpMux) = 0;
+
+ void Start() {
+ nsresult ret;
+
+ MOZ_MTLOG(ML_DEBUG, "Starting");
+
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnableRet(&ret, audio_->GetStream(), &Fake_MediaStream::Start));
+
+ ASSERT_TRUE(NS_SUCCEEDED(ret));
+ }
+
+ void StopInt() {
+ audio_->GetStream()->Stop();
+ }
+
+ void Stop() {
+ MOZ_MTLOG(ML_DEBUG, "Stopping");
+
+ if (audio_pipeline_)
+ audio_pipeline_->ShutdownMedia_m();
+
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnable(this, &TestAgent::StopInt));
+ }
+
+ void Shutdown_s() {
+ audio_rtp_transport_.Shutdown();
+ audio_rtcp_transport_.Shutdown();
+ bundle_transport_.Shutdown();
+ if (audio_pipeline_)
+ audio_pipeline_->DetachTransport_s();
+ }
+
+ void Shutdown() {
+ if (audio_pipeline_)
+ audio_pipeline_->ShutdownMedia_m();
+
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnable(this, &TestAgent::Shutdown_s));
+ }
+
+ uint32_t GetRemoteSSRC() {
+ uint32_t res = 0;
+ audio_conduit_->GetRemoteSSRC(&res);
+ return res;
+ }
+
+ uint32_t GetLocalSSRC() {
+ uint32_t res = 0;
+ audio_conduit_->GetLocalSSRC(&res);
+ return res;
+ }
+
+ int GetAudioRtpCountSent() {
+ return audio_pipeline_->rtp_packets_sent();
+ }
+
+ int GetAudioRtpCountReceived() {
+ return audio_pipeline_->rtp_packets_received();
+ }
+
+ int GetAudioRtcpCountSent() {
+ return audio_pipeline_->rtcp_packets_sent();
+ }
+
+ int GetAudioRtcpCountReceived() {
+ return audio_pipeline_->rtcp_packets_received();
+ }
+
+ protected:
+ mozilla::AudioCodecConfig audio_config_;
+ RefPtr<mozilla::MediaSessionConduit> audio_conduit_;
+ RefPtr<DOMMediaStream> audio_;
+ // TODO(bcampen@mozilla.com): Right now this does not let us test RTCP in
+ // both directions; only the sender's RTCP is sent, but the receiver should
+ // be sending it too.
+ RefPtr<mozilla::MediaPipeline> audio_pipeline_;
+ TransportInfo audio_rtp_transport_;
+ TransportInfo audio_rtcp_transport_;
+ TransportInfo bundle_transport_;
+};
+
+class TestAgentSend : public TestAgent {
+ public:
+ TestAgentSend() : use_bundle_(false) {}
+
+ virtual void CreatePipelines_s(bool aIsRtcpMux) {
+ audio_ = new Fake_DOMMediaStream(new Fake_AudioStreamSource());
+ audio_->SetHintContents(Fake_DOMMediaStream::HINT_CONTENTS_AUDIO);
+
+ nsTArray<RefPtr<Fake_MediaStreamTrack>> tracks;
+ audio_->GetAudioTracks(tracks);
+ ASSERT_EQ(1U, tracks.Length());
+
+ mozilla::MediaConduitErrorCode err =
+ static_cast<mozilla::AudioSessionConduit *>(audio_conduit_.get())->
+ ConfigureSendMediaCodec(&audio_config_);
+ EXPECT_EQ(mozilla::kMediaConduitNoError, err);
+
+ std::string test_pc("PC");
+
+ if (aIsRtcpMux) {
+ ASSERT_FALSE(audio_rtcp_transport_.flow_);
+ }
+
+ RefPtr<TransportFlow> rtp(audio_rtp_transport_.flow_);
+ RefPtr<TransportFlow> rtcp(audio_rtcp_transport_.flow_);
+
+ if (use_bundle_) {
+ rtp = bundle_transport_.flow_;
+ rtcp = nullptr;
+ }
+
+ audio_pipeline_ = new mozilla::MediaPipelineTransmit(
+ test_pc,
+ nullptr,
+ test_utils->sts_target(),
+ tracks[0],
+ "audio_track_fake_uuid",
+ 1,
+ audio_conduit_,
+ rtp,
+ rtcp,
+ nsAutoPtr<MediaPipelineFilter>());
+
+ audio_pipeline_->Init();
+ }
+
+ void SetUsingBundle(bool use_bundle) {
+ use_bundle_ = use_bundle;
+ }
+
+ private:
+ bool use_bundle_;
+};
+
+
+class TestAgentReceive : public TestAgent {
+ public:
+ virtual void CreatePipelines_s(bool aIsRtcpMux) {
+ mozilla::SourceMediaStream *audio = new Fake_SourceMediaStream();
+ audio->SetPullEnabled(true);
+
+ mozilla::AudioSegment* segment= new mozilla::AudioSegment();
+ audio->AddAudioTrack(0, 100, 0, segment);
+ audio->AdvanceKnownTracksTime(mozilla::STREAM_TIME_MAX);
+
+ audio_ = new Fake_DOMMediaStream(audio);
+
+ std::vector<mozilla::AudioCodecConfig *> codecs;
+ codecs.push_back(&audio_config_);
+
+ mozilla::MediaConduitErrorCode err =
+ static_cast<mozilla::AudioSessionConduit *>(audio_conduit_.get())->
+ ConfigureRecvMediaCodecs(codecs);
+ EXPECT_EQ(mozilla::kMediaConduitNoError, err);
+
+ std::string test_pc("PC");
+
+ if (aIsRtcpMux) {
+ ASSERT_FALSE(audio_rtcp_transport_.flow_);
+ }
+
+ audio_pipeline_ = new mozilla::MediaPipelineReceiveAudio(
+ test_pc,
+ nullptr,
+ test_utils->sts_target(),
+ audio_->GetStream()->AsSourceStream(), "audio_track_fake_uuid", 1, 1,
+ static_cast<mozilla::AudioSessionConduit *>(audio_conduit_.get()),
+ audio_rtp_transport_.flow_,
+ audio_rtcp_transport_.flow_,
+ bundle_filter_);
+
+ audio_pipeline_->Init();
+ }
+
+ void SetBundleFilter(nsAutoPtr<MediaPipelineFilter> filter) {
+ bundle_filter_ = filter;
+ }
+
+ void UpdateFilter_s(
+ nsAutoPtr<MediaPipelineFilter> filter) {
+ audio_pipeline_->UpdateTransport_s(1,
+ audio_rtp_transport_.flow_,
+ audio_rtcp_transport_.flow_,
+ filter);
+ }
+
+ private:
+ nsAutoPtr<MediaPipelineFilter> bundle_filter_;
+};
+
+
+class MediaPipelineTest : public ::testing::Test {
+ public:
+ ~MediaPipelineTest() {
+ p1_.Stop();
+ p2_.Stop();
+ p1_.Shutdown();
+ p2_.Shutdown();
+ }
+
+ // Setup transport.
+ void InitTransports(bool aIsRtcpMux) {
+ // RTP, p1_ is server, p2_ is client
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnableNM(&TestAgent::ConnectRtp, &p2_, &p1_));
+
+ // Create RTCP flows separately if we are not muxing them.
+ if(!aIsRtcpMux) {
+ // RTCP, p1_ is server, p2_ is client
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnableNM(&TestAgent::ConnectRtcp, &p2_, &p1_));
+ }
+
+ // BUNDLE, p1_ is server, p2_ is client
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnableNM(&TestAgent::ConnectBundle, &p2_, &p1_));
+ }
+
+ // Verify RTP and RTCP
+ void TestAudioSend(bool aIsRtcpMux,
+ nsAutoPtr<MediaPipelineFilter> initialFilter =
+ nsAutoPtr<MediaPipelineFilter>(nullptr),
+ nsAutoPtr<MediaPipelineFilter> refinedFilter =
+ nsAutoPtr<MediaPipelineFilter>(nullptr),
+ unsigned int ms_until_filter_update = 500,
+ unsigned int ms_of_traffic_after_answer = 10000) {
+
+ bool bundle = !!(initialFilter);
+ // We do not support testing bundle without rtcp mux, since that doesn't
+ // make any sense.
+ ASSERT_FALSE(!aIsRtcpMux && bundle);
+
+ p2_.SetBundleFilter(initialFilter);
+
+ // Setup transport flows
+ InitTransports(aIsRtcpMux);
+
+ NS_DispatchToMainThread(
+ WrapRunnable(&p1_, &TestAgent::CreatePipelines_s, aIsRtcpMux),
+ NS_DISPATCH_SYNC);
+
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnable(&p2_, &TestAgent::CreatePipelines_s, aIsRtcpMux));
+
+ p2_.Start();
+ p1_.Start();
+
+ if (bundle) {
+ PR_Sleep(ms_until_filter_update);
+
+ // Leaving refinedFilter not set implies we want to just update with
+ // the other side's SSRC
+ if (!refinedFilter) {
+ refinedFilter = new MediaPipelineFilter;
+ // Might not be safe, strictly speaking.
+ refinedFilter->AddRemoteSSRC(p1_.GetLocalSSRC());
+ }
+
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnable(&p2_,
+ &TestAgentReceive::UpdateFilter_s,
+ refinedFilter));
+ }
+
+ // wait for some RTP/RTCP tx and rx to happen
+ PR_Sleep(ms_of_traffic_after_answer);
+
+ p1_.Stop();
+ p2_.Stop();
+
+ // wait for any packets in flight to arrive
+ PR_Sleep(100);
+
+ p1_.Shutdown();
+ p2_.Shutdown();
+
+ if (!bundle) {
+ // If we are filtering, allow the test-case to do this checking.
+ ASSERT_GE(p1_.GetAudioRtpCountSent(), 40);
+ ASSERT_EQ(p1_.GetAudioRtpCountReceived(), p2_.GetAudioRtpCountSent());
+ ASSERT_EQ(p1_.GetAudioRtpCountSent(), p2_.GetAudioRtpCountReceived());
+
+ // Calling ShutdownMedia_m on both pipelines does not stop the flow of
+ // RTCP. So, we might be off by one here.
+ ASSERT_LE(p2_.GetAudioRtcpCountReceived(), p1_.GetAudioRtcpCountSent());
+ ASSERT_GE(p2_.GetAudioRtcpCountReceived() + 1, p1_.GetAudioRtcpCountSent());
+ }
+
+ }
+
+ void TestAudioReceiverBundle(bool bundle_accepted,
+ nsAutoPtr<MediaPipelineFilter> initialFilter,
+ nsAutoPtr<MediaPipelineFilter> refinedFilter =
+ nsAutoPtr<MediaPipelineFilter>(nullptr),
+ unsigned int ms_until_answer = 500,
+ unsigned int ms_of_traffic_after_answer = 10000) {
+ TestAudioSend(true,
+ initialFilter,
+ refinedFilter,
+ ms_until_answer,
+ ms_of_traffic_after_answer);
+ }
+protected:
+ TestAgentSend p1_;
+ TestAgentReceive p2_;
+};
+
+class MediaPipelineFilterTest : public ::testing::Test {
+ public:
+ bool Filter(MediaPipelineFilter& filter,
+ int32_t correlator,
+ uint32_t ssrc,
+ uint8_t payload_type) {
+
+ webrtc::RTPHeader header;
+ header.ssrc = ssrc;
+ header.payloadType = payload_type;
+ return filter.Filter(header, correlator);
+ }
+};
+
+TEST_F(MediaPipelineFilterTest, TestConstruct) {
+ MediaPipelineFilter filter;
+}
+
+TEST_F(MediaPipelineFilterTest, TestDefault) {
+ MediaPipelineFilter filter;
+ ASSERT_FALSE(Filter(filter, 0, 233, 110));
+}
+
+TEST_F(MediaPipelineFilterTest, TestSSRCFilter) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(555);
+ ASSERT_TRUE(Filter(filter, 0, 555, 110));
+ ASSERT_FALSE(Filter(filter, 0, 556, 110));
+}
+
+#define SSRC(ssrc) \
+ ((ssrc >> 24) & 0xFF), \
+ ((ssrc >> 16) & 0xFF), \
+ ((ssrc >> 8 ) & 0xFF), \
+ (ssrc & 0xFF)
+
+#define REPORT_FRAGMENT(ssrc) \
+ SSRC(ssrc), \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,0
+
+#define RTCP_TYPEINFO(num_rrs, type, size) \
+ 0x80 + num_rrs, type, 0, size
+
+const unsigned char rtcp_sr_s16[] = {
+ // zero rrs, size 6 words
+ RTCP_TYPEINFO(0, MediaPipelineFilter::SENDER_REPORT_T, 6),
+ REPORT_FRAGMENT(16)
+};
+
+const unsigned char rtcp_sr_s16_r17[] = {
+ // one rr, size 12 words
+ RTCP_TYPEINFO(1, MediaPipelineFilter::SENDER_REPORT_T, 12),
+ REPORT_FRAGMENT(16),
+ REPORT_FRAGMENT(17)
+};
+
+const unsigned char unknown_type[] = {
+ RTCP_TYPEINFO(1, 222, 0)
+};
+
+TEST_F(MediaPipelineFilterTest, TestEmptyFilterReport0) {
+ MediaPipelineFilter filter;
+ ASSERT_FALSE(filter.FilterSenderReport(rtcp_sr_s16, sizeof(rtcp_sr_s16)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReport0) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(16);
+ ASSERT_TRUE(filter.FilterSenderReport(rtcp_sr_s16, sizeof(rtcp_sr_s16)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReport0PTTruncated) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(16);
+ const unsigned char data[] = {0x80};
+ ASSERT_FALSE(filter.FilterSenderReport(data, sizeof(data)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReport0CountTruncated) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(16);
+ const unsigned char data[] = {};
+ ASSERT_FALSE(filter.FilterSenderReport(data, sizeof(data)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReport1SSRCTruncated) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(16);
+ const unsigned char sr[] = {
+ RTCP_TYPEINFO(1, MediaPipelineFilter::SENDER_REPORT_T, 12),
+ REPORT_FRAGMENT(16),
+ 0,0,0
+ };
+ ASSERT_TRUE(filter.FilterSenderReport(sr, sizeof(sr)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReport1BigSSRC) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(0x01020304);
+ const unsigned char sr[] = {
+ RTCP_TYPEINFO(1, MediaPipelineFilter::SENDER_REPORT_T, 12),
+ SSRC(0x01020304),
+ REPORT_FRAGMENT(0x11121314)
+ };
+ ASSERT_TRUE(filter.FilterSenderReport(sr, sizeof(sr)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReportMatch) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(16);
+ ASSERT_TRUE(filter.FilterSenderReport(rtcp_sr_s16_r17,
+ sizeof(rtcp_sr_s16_r17)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterReportNoMatch) {
+ MediaPipelineFilter filter;
+ filter.AddRemoteSSRC(17);
+ ASSERT_FALSE(filter.FilterSenderReport(rtcp_sr_s16_r17,
+ sizeof(rtcp_sr_s16_r17)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestFilterUnknownRTCPType) {
+ MediaPipelineFilter filter;
+ ASSERT_FALSE(filter.FilterSenderReport(unknown_type, sizeof(unknown_type)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestCorrelatorFilter) {
+ MediaPipelineFilter filter;
+ filter.SetCorrelator(7777);
+ ASSERT_TRUE(Filter(filter, 7777, 16, 110));
+ ASSERT_FALSE(Filter(filter, 7778, 17, 110));
+ // This should also have resulted in the SSRC 16 being added to the filter
+ ASSERT_TRUE(Filter(filter, 0, 16, 110));
+ ASSERT_FALSE(Filter(filter, 0, 17, 110));
+
+ // rtcp_sr_s16 has 16 as an SSRC
+ ASSERT_TRUE(filter.FilterSenderReport(rtcp_sr_s16, sizeof(rtcp_sr_s16)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestPayloadTypeFilter) {
+ MediaPipelineFilter filter;
+ filter.AddUniquePT(110);
+ ASSERT_TRUE(Filter(filter, 0, 555, 110));
+ ASSERT_FALSE(Filter(filter, 0, 556, 111));
+}
+
+TEST_F(MediaPipelineFilterTest, TestPayloadTypeFilterSSRCUpdate) {
+ MediaPipelineFilter filter;
+ filter.AddUniquePT(110);
+ ASSERT_TRUE(Filter(filter, 0, 16, 110));
+
+ // rtcp_sr_s16 has 16 as an SSRC
+ ASSERT_TRUE(filter.FilterSenderReport(rtcp_sr_s16, sizeof(rtcp_sr_s16)));
+}
+
+TEST_F(MediaPipelineFilterTest, TestSSRCMovedWithCorrelator) {
+ MediaPipelineFilter filter;
+ filter.SetCorrelator(7777);
+ ASSERT_TRUE(Filter(filter, 7777, 555, 110));
+ ASSERT_TRUE(Filter(filter, 0, 555, 110));
+ ASSERT_FALSE(Filter(filter, 7778, 555, 110));
+ ASSERT_FALSE(Filter(filter, 0, 555, 110));
+}
+
+TEST_F(MediaPipelineFilterTest, TestRemoteSDPNoSSRCs) {
+ // If the remote SDP doesn't have SSRCs, right now this is a no-op and
+ // there is no point of even incorporating a filter, but we make the
+ // behavior consistent to avoid confusion.
+ MediaPipelineFilter filter;
+ filter.SetCorrelator(7777);
+ filter.AddUniquePT(111);
+ ASSERT_TRUE(Filter(filter, 7777, 555, 110));
+
+ MediaPipelineFilter filter2;
+
+ filter.Update(filter2);
+
+ // Ensure that the old SSRC still works.
+ ASSERT_TRUE(Filter(filter, 0, 555, 110));
+}
+
+TEST_F(MediaPipelineTest, DISABLED_TestAudioSendNoMux) {
+ TestAudioSend(false);
+}
+
+TEST_F(MediaPipelineTest, DISABLED_TestAudioSendMux) {
+ TestAudioSend(true);
+}
+
+TEST_F(MediaPipelineTest, TestAudioSendBundle) {
+ nsAutoPtr<MediaPipelineFilter> filter(new MediaPipelineFilter);
+ // These durations have to be _extremely_ long to have any assurance that
+ // some RTCP will be sent at all. This is because the first RTCP packet
+ // is sometimes sent before the transports are ready, which causes it to
+ // be dropped.
+ TestAudioReceiverBundle(true,
+ filter,
+ // We do not specify the filter for the remote description, so it will be
+ // set to something sane after a short time.
+ nsAutoPtr<MediaPipelineFilter>(),
+ 10000,
+ 10000);
+
+ // Some packets should have been dropped, but not all
+ ASSERT_GT(p1_.GetAudioRtpCountSent(), p2_.GetAudioRtpCountReceived());
+ ASSERT_GT(p2_.GetAudioRtpCountReceived(), 40);
+ ASSERT_GT(p1_.GetAudioRtcpCountSent(), 1);
+ ASSERT_GT(p1_.GetAudioRtcpCountSent(), p2_.GetAudioRtcpCountReceived());
+ ASSERT_GT(p2_.GetAudioRtcpCountReceived(), 0);
+}
+
+TEST_F(MediaPipelineTest, TestAudioSendEmptyBundleFilter) {
+ nsAutoPtr<MediaPipelineFilter> filter(new MediaPipelineFilter);
+ nsAutoPtr<MediaPipelineFilter> bad_answer_filter(new MediaPipelineFilter);
+ TestAudioReceiverBundle(true, filter, bad_answer_filter);
+ // Filter is empty, so should drop everything.
+ ASSERT_EQ(0, p2_.GetAudioRtpCountReceived());
+}
+
+} // end namespace
+
+
+int main(int argc, char **argv) {
+ ScopedXPCOM xpcom("mediapipeline_unittest");
+ test_utils = new MtransportTestUtils();
+ // Start the tests
+ NSS_NoDB_Init(nullptr);
+ NSS_SetDomesticPolicy();
+ ::testing::InitGoogleTest(&argc, argv);
+
+ int rv = RUN_ALL_TESTS();
+ delete test_utils;
+ return rv;
+}
+
+
+
diff --git a/media/webrtc/signaling/test/moz.build b/media/webrtc/signaling/test/moz.build
new file mode 100644
index 000000000..4d8704de4
--- /dev/null
+++ b/media/webrtc/signaling/test/moz.build
@@ -0,0 +1,33 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# TODO: bug 1172551 - get these tests working on iOS
+if CONFIG['OS_TARGET'] != 'WINNT' and CONFIG['MOZ_WIDGET_TOOLKIT'] != 'gonk' and CONFIG['MOZ_WIDGET_TOOLKIT'] != 'uikit':
+ GeckoCppUnitTests([
+ 'jsep_session_unittest',
+ 'jsep_track_unittest',
+ 'mediaconduit_unittests',
+ 'sdp_file_parser',
+ 'sdp_unittests',
+ 'signaling_unittests',
+ ])
+
+include('/ipc/chromium/chromium-config.mozbuild')
+include('common.build')
+
+USE_LIBS += [
+ '/media/webrtc/signalingtest/signaling_ecc/ecc',
+ 'mtransport_s',
+]
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
+
+if CONFIG['_MSC_VER']:
+ # This is intended as a temporary workaround to enable warning free building
+ # with VS2015.
+ # reinterpret_cast': conversion from 'DWORD' to 'HANDLE' of greater size
+ CXXFLAGS += ['-wd4312']
diff --git a/media/webrtc/signaling/test/sdp_file_parser.cpp b/media/webrtc/signaling/test/sdp_file_parser.cpp
new file mode 100644
index 000000000..3fb0c2f1c
--- /dev/null
+++ b/media/webrtc/signaling/test/sdp_file_parser.cpp
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <string>
+#include <iostream>
+#include <fstream>
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+// without this include linking fails
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+
+#include "signaling/src/sdp/SipccSdpParser.h"
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+namespace mozilla {
+
+const std::string kDefaultFilename((char *)"/tmp/sdp.bin");
+std::string filename(kDefaultFilename);
+
+class SdpParseTest : public ::testing::Test
+{
+ public:
+ SdpParseTest() {}
+
+ void ParseSdp(const std::string &sdp) {
+ mSdp = mParser.Parse(sdp);
+ }
+
+ void SerializeSdp() {
+ if (mSdp) {
+ mSdp->Serialize(os);
+ std::cout << "Serialized SDP:" << std::endl <<
+ os.str() << std::endl;;
+ }
+ }
+
+ SipccSdpParser mParser;
+ mozilla::UniquePtr<Sdp> mSdp;
+ std::stringstream os;
+}; // class SdpParseTest
+
+TEST_F(SdpParseTest, parseSdpFromFile)
+{
+ std::ifstream file(filename.c_str(),
+ std::ios::in|std::ios::binary|std::ios::ate);
+ ASSERT_TRUE(file.is_open());
+ std::streampos size = file.tellg();
+ size_t nsize = size;
+ nsize+=1;
+ char *memblock = new char [nsize];
+ memset(memblock, '\0', nsize);
+ file.seekg(0, std::ios::beg);
+ file.read(memblock, size);
+ file.close();
+ std::cout << "Read file " << filename << std::endl;
+ ParseSdp(memblock);
+ std::cout << "Parsed SDP" << std::endl;
+ SerializeSdp();
+ delete[] memblock;
+}
+
+} // End namespace mozilla.
+
+int main(int argc, char **argv)
+{
+ ::testing::InitGoogleTest(&argc, argv);
+
+ if (argc == 2) {
+ mozilla::filename = argv[1];
+ } else if (argc > 2) {
+ std::cerr << "Usage: ./sdp_file_parser [filename]" << std::endl;
+ return(1);
+ }
+
+ return RUN_ALL_TESTS();
+}
diff --git a/media/webrtc/signaling/test/sdp_unittests.cpp b/media/webrtc/signaling/test/sdp_unittests.cpp
new file mode 100644
index 000000000..6d00764ae
--- /dev/null
+++ b/media/webrtc/signaling/test/sdp_unittests.cpp
@@ -0,0 +1,5377 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "timecard.h"
+
+#include "CSFLog.h"
+
+#include <string>
+#include <sstream>
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+#include "nspr.h"
+#include "nss.h"
+#include "ssl.h"
+
+#include "nsThreadUtils.h"
+#include "FakeMediaStreams.h"
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionCtx.h"
+
+#include "mtransport_test_utils.h"
+MtransportTestUtils *test_utils;
+nsCOMPtr<nsIThread> gThread;
+
+#include "signaling/src/sdp/SipccSdpParser.h"
+#include "signaling/src/sdp/SdpMediaSection.h"
+#include "signaling/src/sdp/SdpAttribute.h"
+
+extern "C" {
+#include "signaling/src/sdp/sipcc/sdp.h"
+#include "signaling/src/sdp/sipcc/sdp_private.h"
+}
+
+#ifdef CRLF
+#undef CRLF
+#endif
+#define CRLF "\r\n"
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+#include "TestHarness.h"
+
+using namespace mozilla;
+
+namespace test {
+
+static bool SetupGlobalThread() {
+ if (!gThread) {
+ nsIThread *thread;
+
+ nsresult rv = NS_NewNamedThread("pseudo-main",&thread);
+ if (NS_FAILED(rv))
+ return false;
+
+ gThread = thread;
+ PeerConnectionCtx::InitializeGlobal(gThread,
+ test_utils->sts_target());
+ }
+ return true;
+}
+
+class SdpTest : public ::testing::Test {
+ public:
+ SdpTest() : sdp_ptr_(nullptr) {
+ }
+
+ ~SdpTest() {
+ sdp_free_description(sdp_ptr_);
+ }
+
+ static void SetUpTestCase() {
+ ASSERT_TRUE(SetupGlobalThread());
+ }
+
+ void SetUp() {
+ final_level_ = 0;
+ sdp_ptr_ = nullptr;
+ }
+
+ static void TearDownTestCase() {
+ if (gThread) {
+ gThread->Shutdown();
+ }
+ gThread = nullptr;
+ }
+
+ void ResetSdp() {
+ if (!sdp_ptr_) {
+ sdp_free_description(sdp_ptr_);
+ }
+
+ sdp_media_e supported_media[] = {
+ SDP_MEDIA_AUDIO,
+ SDP_MEDIA_VIDEO,
+ SDP_MEDIA_APPLICATION,
+ SDP_MEDIA_DATA,
+ SDP_MEDIA_CONTROL,
+ SDP_MEDIA_NAS_RADIUS,
+ SDP_MEDIA_NAS_TACACS,
+ SDP_MEDIA_NAS_DIAMETER,
+ SDP_MEDIA_NAS_L2TP,
+ SDP_MEDIA_NAS_LOGIN,
+ SDP_MEDIA_NAS_NONE,
+ SDP_MEDIA_IMAGE,
+ };
+
+ sdp_conf_options_t *config_p = sdp_init_config();
+ unsigned int i;
+ for (i = 0; i < sizeof(supported_media) / sizeof(sdp_media_e); i++) {
+ sdp_media_supported(config_p, supported_media[i], true);
+ }
+ sdp_nettype_supported(config_p, SDP_NT_INTERNET, true);
+ sdp_addrtype_supported(config_p, SDP_AT_IP4, true);
+ sdp_addrtype_supported(config_p, SDP_AT_IP6, true);
+ sdp_transport_supported(config_p, SDP_TRANSPORT_RTPSAVPF, true);
+ sdp_transport_supported(config_p, SDP_TRANSPORT_UDPTL, true);
+ sdp_require_session_name(config_p, false);
+
+ sdp_ptr_ = sdp_init_description(config_p);
+ if (!sdp_ptr_) {
+ sdp_free_config(config_p);
+ }
+ }
+
+ void ParseSdp(const std::string &sdp_str) {
+ const char *buf = sdp_str.data();
+ ResetSdp();
+ ASSERT_EQ(sdp_parse(sdp_ptr_, buf, sdp_str.size()), SDP_SUCCESS);
+ }
+
+ void InitLocalSdp() {
+ ResetSdp();
+ ASSERT_EQ(sdp_set_version(sdp_ptr_, 0), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_owner_username(sdp_ptr_, "-"), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_owner_sessionid(sdp_ptr_, "132954853"), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_owner_version(sdp_ptr_, "0"), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_owner_network_type(sdp_ptr_, SDP_NT_INTERNET),
+ SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_owner_address_type(sdp_ptr_, SDP_AT_IP4), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_owner_address(sdp_ptr_, "198.51.100.7"), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_session_name(sdp_ptr_, "SDP Unit Test"), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_time_start(sdp_ptr_, "0"), SDP_SUCCESS);
+ ASSERT_EQ(sdp_set_time_stop(sdp_ptr_, "0"), SDP_SUCCESS);
+ }
+
+ std::string SerializeSdp() {
+ flex_string fs;
+ flex_string_init(&fs);
+ EXPECT_EQ(sdp_build(sdp_ptr_, &fs), SDP_SUCCESS);
+ std::string body(fs.buffer);
+ flex_string_free(&fs);
+ return body;
+ }
+
+ // Returns "level" for new media section
+ int AddNewMedia(sdp_media_e type) {
+ final_level_++;
+ EXPECT_EQ(sdp_insert_media_line(sdp_ptr_, final_level_), SDP_SUCCESS);
+ EXPECT_EQ(sdp_set_conn_nettype(sdp_ptr_, final_level_, SDP_NT_INTERNET),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_set_conn_addrtype(sdp_ptr_, final_level_, SDP_AT_IP4),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_set_conn_address(sdp_ptr_, final_level_, "198.51.100.7"),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_set_media_type(sdp_ptr_, final_level_, SDP_MEDIA_VIDEO),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_set_media_transport(sdp_ptr_, final_level_,
+ SDP_TRANSPORT_RTPAVP),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_set_media_portnum(sdp_ptr_, final_level_, 12345, 0),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_add_media_payload_type(sdp_ptr_, final_level_, 120,
+ SDP_PAYLOAD_NUMERIC),
+ SDP_SUCCESS);
+ return final_level_;
+ }
+
+ uint16_t AddNewRtcpFbAck(int level, sdp_rtcp_fb_ack_type_e type,
+ uint16_t payload = SDP_ALL_PAYLOADS) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_RTCP_FB,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_rtcp_fb_ack(sdp_ptr_, level, payload, inst_num,
+ type), SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewRtcpFbNack(int level, sdp_rtcp_fb_nack_type_e type,
+ uint16_t payload = SDP_ALL_PAYLOADS) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_RTCP_FB,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_rtcp_fb_nack(sdp_ptr_, level, payload, inst_num,
+ type), SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewRtcpFbTrrInt(int level, uint32_t interval,
+ uint16_t payload = SDP_ALL_PAYLOADS) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_RTCP_FB,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_rtcp_fb_trr_int(sdp_ptr_, level, payload, inst_num,
+ interval), SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewRtcpFbRemb(int level,
+ uint16_t payload = SDP_ALL_PAYLOADS) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_RTCP_FB,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_rtcp_fb_remb(sdp_ptr_, level, payload, inst_num
+ ), SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewRtcpFbCcm(int level, sdp_rtcp_fb_ccm_type_e type,
+ uint16_t payload = SDP_ALL_PAYLOADS) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_RTCP_FB,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_rtcp_fb_ccm(sdp_ptr_, level, payload, inst_num,
+ type), SDP_SUCCESS);
+ return inst_num;
+ }
+ uint16_t AddNewExtMap(int level, const char* uri) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_EXTMAP,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_extmap(sdp_ptr_, level, inst_num,
+ uri, inst_num), SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewFmtpMaxFs(int level, uint32_t max_fs) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_FMTP,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_payload_type(sdp_ptr_, level, 0, inst_num,
+ 120), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_max_fs(sdp_ptr_, level, 0, inst_num, max_fs),
+ SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewFmtpMaxFr(int level, uint32_t max_fr) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_FMTP,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_payload_type(sdp_ptr_, level, 0, inst_num,
+ 120), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_max_fr(sdp_ptr_, level, 0, inst_num, max_fr),
+ SDP_SUCCESS);
+ return inst_num;
+ }
+
+ uint16_t AddNewFmtpMaxFsFr(int level, uint32_t max_fs, uint32_t max_fr) {
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, level, 0, SDP_ATTR_FMTP,
+ &inst_num), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_payload_type(sdp_ptr_, level, 0, inst_num,
+ 120), SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_max_fs(sdp_ptr_, level, 0, inst_num, max_fs),
+ SDP_SUCCESS);
+ EXPECT_EQ(sdp_attr_set_fmtp_max_fr(sdp_ptr_, level, 0, inst_num, max_fr),
+ SDP_SUCCESS);
+ return inst_num;
+ }
+
+ protected:
+ int final_level_;
+ sdp_t *sdp_ptr_;
+};
+
+static const std::string kVideoSdp =
+ "v=0\r\n"
+ "o=- 4294967296 2 IN IP4 127.0.0.1\r\n"
+ "s=SIP Call\r\n"
+ "c=IN IP4 198.51.100.7\r\n"
+ "t=0 0\r\n"
+ "m=video 56436 RTP/SAVPF 120\r\n"
+ "a=rtpmap:120 VP8/90000\r\n";
+
+TEST_F(SdpTest, parseRtcpFbAckRpsi) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ack rpsi\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_ACK_RPSI);
+}
+
+TEST_F(SdpTest, parseRtcpFbAckApp) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ack app\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 1), SDP_RTCP_FB_ACK_APP);
+}
+
+TEST_F(SdpTest, parseRtcpFbAckAppFoo) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ack app foo\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 1), SDP_RTCP_FB_ACK_APP);
+}
+
+TEST_F(SdpTest, parseRtcpFbAckFooBar) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ack foo bar\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_ACK_UNKNOWN);
+}
+
+TEST_F(SdpTest, parseRtcpFbAckFooBarBaz) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ack foo bar baz\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_ACK_UNKNOWN);
+}
+
+TEST_F(SdpTest, parseRtcpFbNack) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_BASIC);
+}
+
+TEST_F(SdpTest, parseRtcpFbNackPli) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack pli\r\n");
+}
+
+TEST_F(SdpTest, parseRtcpFbNackSli) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack sli\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_SLI);
+}
+
+TEST_F(SdpTest, parseRtcpFbNackRpsi) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack rpsi\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_RPSI);
+}
+
+TEST_F(SdpTest, parseRtcpFbNackApp) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack app\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_APP);
+}
+
+TEST_F(SdpTest, parseRtcpFbNackAppFoo) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack app foo\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_APP);
+}
+
+TEST_F(SdpTest, parseRtcpFbNackAppFooBar) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack app foo bar\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_APP);
+}
+
+TEST_F(SdpTest, parseRtcpFbNackFooBarBaz) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 nack foo bar baz\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_UNKNOWN);
+}
+
+TEST_F(SdpTest, parseRtcpFbRemb) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 goog-remb\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_remb_enabled(sdp_ptr_, 1, 120), true);
+}
+
+TEST_F(SdpTest, parseRtcpRbRembAllPt) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:* goog-remb\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_remb_enabled(sdp_ptr_, 1, SDP_ALL_PAYLOADS),
+ true);
+}
+
+TEST_F(SdpTest, parseRtcpFbTrrInt0) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 trr-int 0\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_trr_int(sdp_ptr_, 1, 120, 1), 0U);
+}
+
+TEST_F(SdpTest, parseRtcpFbTrrInt123) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 trr-int 123\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_trr_int(sdp_ptr_, 1, 120, 1), 123U);
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmFir) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm fir\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1), SDP_RTCP_FB_CCM_FIR);
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmTmmbr) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm tmmbr\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_CCM_TMMBR);
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmTmmbrSmaxpr) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm tmmbr smaxpr=456\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_CCM_TMMBR);
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmTstr) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm tstr\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_CCM_TSTR);
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmVbcm) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm vbcm 123 456 789\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_CCM_VBCM);
+ // We don't currently parse out VBCM submessage types, since we don't have
+ // any use for them.
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmFoo) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm foo\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_CCM_UNKNOWN);
+}
+
+TEST_F(SdpTest, parseRtcpFbCcmFooBarBaz) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 ccm foo bar baz\r\n");
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_CCM_UNKNOWN);
+}
+
+TEST_F(SdpTest, parseRtcpFbFoo) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 foo\r\n");
+}
+
+TEST_F(SdpTest, parseRtcpFbFooBar) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 foo bar\r\n");
+}
+
+TEST_F(SdpTest, parseRtcpFbFooBarBaz) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:120 foo bar baz\r\n");
+}
+
+static const std::string kVideoSdpWithUnknonwBrokenFtmp =
+ "v=0\r\n"
+ "o=- 4294967296 2 IN IP4 127.0.0.1\r\n"
+ "s=SIP Call\r\n"
+ "c=IN IP4 198.51.100.7\r\n"
+ "t=0 0\r\n"
+ "m=video 56436 RTP/SAVPF 120\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=fmtp:122 unknown=10\n"
+ "a=rtpmap:122 red/90000\r\n";
+
+TEST_F(SdpTest, parseUnknownBrokenFtmp) {
+ ParseSdp(kVideoSdpWithUnknonwBrokenFtmp);
+}
+
+TEST_F(SdpTest, parseRtcpFbKitchenSink) {
+ ParseSdp(kVideoSdp +
+ "a=rtcp-fb:120 ack rpsi\r\n"
+ "a=rtcp-fb:120 ack app\r\n"
+ "a=rtcp-fb:120 ack app foo\r\n"
+ "a=rtcp-fb:120 ack foo bar\r\n"
+ "a=rtcp-fb:120 ack foo bar baz\r\n"
+ "a=rtcp-fb:120 nack\r\n"
+ "a=rtcp-fb:120 nack pli\r\n"
+ "a=rtcp-fb:120 nack sli\r\n"
+ "a=rtcp-fb:120 nack rpsi\r\n"
+ "a=rtcp-fb:120 nack app\r\n"
+ "a=rtcp-fb:120 nack app foo\r\n"
+ "a=rtcp-fb:120 nack app foo bar\r\n"
+ "a=rtcp-fb:120 nack foo bar baz\r\n"
+ "a=rtcp-fb:120 trr-int 0\r\n"
+ "a=rtcp-fb:120 trr-int 123\r\n"
+ "a=rtcp-fb:120 goog-remb\r\n"
+ "a=rtcp-fb:120 ccm fir\r\n"
+ "a=rtcp-fb:120 ccm tmmbr\r\n"
+ "a=rtcp-fb:120 ccm tmmbr smaxpr=456\r\n"
+ "a=rtcp-fb:120 ccm tstr\r\n"
+ "a=rtcp-fb:120 ccm vbcm 123 456 789\r\n"
+ "a=rtcp-fb:120 ccm foo\r\n"
+ "a=rtcp-fb:120 ccm foo bar baz\r\n"
+ "a=rtcp-fb:120 foo\r\n"
+ "a=rtcp-fb:120 foo bar\r\n"
+ "a=rtcp-fb:120 foo bar baz\r\n");
+
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 1), SDP_RTCP_FB_ACK_RPSI);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 2), SDP_RTCP_FB_ACK_APP);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 3), SDP_RTCP_FB_ACK_APP);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 4),
+ SDP_RTCP_FB_ACK_UNKNOWN);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 5),
+ SDP_RTCP_FB_ACK_UNKNOWN);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, 120, 6),
+ SDP_RTCP_FB_ACK_NOT_FOUND);
+
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 1),
+ SDP_RTCP_FB_NACK_BASIC);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 2),
+ SDP_RTCP_FB_NACK_PLI);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 3),
+ SDP_RTCP_FB_NACK_SLI);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 4),
+ SDP_RTCP_FB_NACK_RPSI);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 5),
+ SDP_RTCP_FB_NACK_APP);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 6),
+ SDP_RTCP_FB_NACK_APP);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 7),
+ SDP_RTCP_FB_NACK_APP);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 8),
+ SDP_RTCP_FB_NACK_UNKNOWN);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_nack(sdp_ptr_, 1, 120, 9),
+ SDP_RTCP_FB_NACK_NOT_FOUND);
+
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_trr_int(sdp_ptr_, 1, 120, 1), 0U);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_trr_int(sdp_ptr_, 1, 120, 2), 123U);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_trr_int(sdp_ptr_, 1, 120, 3), 0xFFFFFFFF);
+
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_remb_enabled(sdp_ptr_, 1, 120), true);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_remb_enabled(sdp_ptr_, 2, 120), false);
+
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 1), SDP_RTCP_FB_CCM_FIR);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 2),
+ SDP_RTCP_FB_CCM_TMMBR);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 3),
+ SDP_RTCP_FB_CCM_TMMBR);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 4),
+ SDP_RTCP_FB_CCM_TSTR);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 5),
+ SDP_RTCP_FB_CCM_VBCM);
+ // We don't currently parse out VBCM submessage types, since we don't have
+ // any use for them.
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 6),
+ SDP_RTCP_FB_CCM_UNKNOWN);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 7),
+ SDP_RTCP_FB_CCM_UNKNOWN);
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ccm(sdp_ptr_, 1, 120, 8),
+ SDP_RTCP_FB_CCM_NOT_FOUND);
+}
+
+TEST_F(SdpTest, addRtcpFbAckRpsi) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbAck(level, SDP_RTCP_FB_ACK_RPSI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 ack rpsi\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbAckRpsiAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbAck(level, SDP_RTCP_FB_ACK_RPSI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* ack rpsi\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbAckApp) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbAck(level, SDP_RTCP_FB_ACK_APP, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 ack app\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbAckAppAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbAck(level, SDP_RTCP_FB_ACK_APP);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* ack app\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNack) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_BASIC, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_BASIC);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackSli) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_SLI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack sli\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackSliAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_SLI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack sli\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackPli) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_PLI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack pli\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackPliAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_PLI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack pli\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackRpsi) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_RPSI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack rpsi\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackRpsiAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_RPSI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack rpsi\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackApp) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_APP, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack app\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackAppAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_APP);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack app\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackRai) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_RAI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack rai\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackRaiAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_RAI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack rai\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackTllei) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_TLLEI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack tllei\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackTlleiAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_TLLEI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack tllei\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackPslei) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_PSLEI, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack pslei\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackPsleiAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_PSLEI);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack pslei\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackEcn) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_ECN, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 nack ecn\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackEcnAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbNack(level, SDP_RTCP_FB_NACK_ECN);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* nack ecn\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbRemb) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbRemb(level, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 goog-remb\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbRembAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbRemb(level);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* goog-remb\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbTrrInt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbTrrInt(level, 12345, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 trr-int 12345\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbNackTrrIntAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbTrrInt(level, 0);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* trr-int 0\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmFir) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_FIR, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 ccm fir\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmFirAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_FIR);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* ccm fir\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmTmmbr) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_TMMBR, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 ccm tmmbr\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmTmmbrAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_TMMBR);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* ccm tmmbr\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmTstr) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_TSTR, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 ccm tstr\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmTstrAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_TSTR);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* ccm tstr\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmVbcm) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_VBCM, 120);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:120 ccm vbcm\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addRtcpFbCcmVbcmAllPt) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewRtcpFbCcm(level, SDP_RTCP_FB_CCM_VBCM);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=rtcp-fb:* ccm vbcm\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, parseRtcpFbAllPayloads) {
+ ParseSdp(kVideoSdp + "a=rtcp-fb:* ack rpsi\r\n");
+ for (int i = 0; i < 128; i++) {
+ ASSERT_EQ(sdp_attr_get_rtcp_fb_ack(sdp_ptr_, 1, i, 1),
+ SDP_RTCP_FB_ACK_RPSI);
+ }
+}
+TEST_F(SdpTest, addExtMap) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewExtMap(level, SDP_EXTMAP_AUDIO_LEVEL);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, parseExtMap) {
+ ParseSdp(kVideoSdp +
+ "a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\n");
+ ASSERT_STREQ(sdp_attr_get_extmap_uri(sdp_ptr_, 1, 1),
+ SDP_EXTMAP_AUDIO_LEVEL);
+ ASSERT_EQ(sdp_attr_get_extmap_id(sdp_ptr_, 1, 1),
+ 1);
+
+}
+
+TEST_F(SdpTest, parseFmtpBitrate) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bitrate=400\r\n");
+ ASSERT_EQ(400, sdp_attr_get_fmtp_bitrate_type(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpBitrateWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bitrate=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_bitrate_type(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpBitrateWith32001) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bitrate=32001\r\n");
+ ASSERT_EQ(32001, sdp_attr_get_fmtp_bitrate_type(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpBitrateWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bitrate=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_bitrate_type(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpMode) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 mode=200\r\n");
+ ASSERT_EQ(200U, sdp_attr_get_fmtp_mode_for_payload_type(sdp_ptr_, 1, 0, 120));
+}
+
+TEST_F(SdpTest, parseFmtpModeWith4294967295) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 mode=4294967295\r\n");
+ ASSERT_EQ(4294967295, sdp_attr_get_fmtp_mode_for_payload_type(sdp_ptr_, 1, 0, 120));
+}
+
+TEST_F(SdpTest, parseFmtpModeWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 mode=4294967296\r\n");
+ // returns 0 if not found
+ ASSERT_EQ(0U, sdp_attr_get_fmtp_mode_for_payload_type(sdp_ptr_, 1, 0, 120));
+}
+
+TEST_F(SdpTest, parseFmtpQcif) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 qcif=20\r\n");
+ ASSERT_EQ(20, sdp_attr_get_fmtp_qcif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpQcifWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 qcif=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_qcif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpQcifWith33) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 qcif=33\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_qcif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif=11\r\n");
+ ASSERT_EQ(11, sdp_attr_get_fmtp_cif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCifWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_cif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCifWith33) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif=33\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_cif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpMaxbr) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxbr=21\r\n");
+ ASSERT_EQ(21, sdp_attr_get_fmtp_maxbr(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpMaxbrWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxbr=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_maxbr(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpMaxbrWith65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxbr=65536\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_maxbr(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpSqcif) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sqcif=6\r\n");
+ ASSERT_EQ(6, sdp_attr_get_fmtp_sqcif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpSqcifWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sqcif=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_sqcif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpSqcifWith33) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sqcif=33\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_sqcif(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif4) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif4=11\r\n");
+ ASSERT_EQ(11, sdp_attr_get_fmtp_cif4(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif4With0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif4=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_cif4(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif4With33) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif4=33\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_cif4(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif16) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif16=11\r\n");
+ ASSERT_EQ(11, sdp_attr_get_fmtp_cif16(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif16With0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif16=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_cif16(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpCif16With33) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cif16=33\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_cif16(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpBpp) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bpp=7\r\n");
+ ASSERT_EQ(7, sdp_attr_get_fmtp_bpp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpBppWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bpp=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_bpp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpBppWith65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 bpp=65536\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_bpp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpHrd) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 hrd=800\r\n");
+ ASSERT_EQ(800, sdp_attr_get_fmtp_hrd(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpHrdWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 hrd=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_hrd(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpHrdWith65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 hrd=65536\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_hrd(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpProfile) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 profile=4\r\n");
+ ASSERT_EQ(4, sdp_attr_get_fmtp_profile(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpProfileWith11) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 profile=11\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_profile(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpLevel) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 level=56\r\n");
+ ASSERT_EQ(56, sdp_attr_get_fmtp_level(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpLevelWith101) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 level=101\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_level(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpPacketizationMode) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 packetization-mode=1\r\n");
+ uint16_t packetizationMode;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_pack_mode(sdp_ptr_, 1, 0, 1, &packetizationMode));
+ ASSERT_EQ(1, packetizationMode);
+}
+
+TEST_F(SdpTest, parseFmtpPacketizationModeWith3) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 packetization-mode=3\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_pack_mode(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpInterleavingDepth) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-interleaving-depth=566\r\n");
+ uint16_t interleavingDepth;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_interleaving_depth(sdp_ptr_, 1, 0, 1, &interleavingDepth));
+ ASSERT_EQ(566, interleavingDepth);
+}
+
+TEST_F(SdpTest, parseFmtpInterleavingDepthWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-interleaving-depth=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_interleaving_depth(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpInterleavingDepthWith65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-interleaving-depth=65536\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_interleaving_depth(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpDeintBuf) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-deint-buf-req=4294967295\r\n");
+ uint32_t deintBuf;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_deint_buf_req(sdp_ptr_, 1, 0, 1, &deintBuf));
+ ASSERT_EQ(4294967295, deintBuf);
+}
+
+TEST_F(SdpTest, parseFmtpDeintBufWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-deint-buf-req=0\r\n");
+ uint32_t deintBuf;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_deint_buf_req(sdp_ptr_, 1, 0, 1, &deintBuf));
+ ASSERT_EQ(0U, deintBuf);
+}
+
+TEST_F(SdpTest, parseFmtpDeintBufWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-deint-buf-req=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_deint_buf_req(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxDonDiff) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-max-don-diff=5678\r\n");
+ uint32_t maxDonDiff;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_don_diff(sdp_ptr_, 1, 0, 1, &maxDonDiff));
+ ASSERT_EQ(5678U, maxDonDiff);
+}
+
+TEST_F(SdpTest, parseFmtpMaxDonDiffWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-max-don-diff=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_don_diff(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxDonDiffWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-max-don-diff=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_don_diff(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpInitBufTime) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-init-buf-time=4294967295\r\n");
+ uint32_t initBufTime;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_init_buf_time(sdp_ptr_, 1, 0, 1, &initBufTime));
+ ASSERT_EQ(4294967295, initBufTime);
+}
+
+TEST_F(SdpTest, parseFmtpInitBufTimeWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-init-buf-time=0\r\n");
+ uint32_t initBufTime;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_init_buf_time(sdp_ptr_, 1, 0, 1, &initBufTime));
+ ASSERT_EQ(0U, initBufTime);
+}
+
+TEST_F(SdpTest, parseFmtpInitBufTimeWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 sprop-init-buf-time=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_init_buf_time(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxMbps) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-mbps=46789\r\n");
+ uint32_t maxMpbs;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_mbps(sdp_ptr_, 1, 0, 1, &maxMpbs));
+ ASSERT_EQ(46789U, maxMpbs);
+}
+
+TEST_F(SdpTest, parseFmtpMaxMbpsWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-mbps=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_mbps(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxMbpsWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-mbps=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_mbps(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxCpb) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-cpb=47891\r\n");
+ uint32_t maxCpb;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_cpb(sdp_ptr_, 1, 0, 1, &maxCpb));
+ ASSERT_EQ(47891U, maxCpb);
+}
+
+TEST_F(SdpTest, parseFmtpMaxCpbWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-cpb=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_cpb(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxCpbWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-cpb=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_cpb(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxDpb) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-dpb=47892\r\n");
+ uint32_t maxDpb;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_dpb(sdp_ptr_, 1, 0, 1, &maxDpb));
+ ASSERT_EQ(47892U, maxDpb);
+}
+
+TEST_F(SdpTest, parseFmtpMaxDpbWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-dpb=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_dpb(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxDpbWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-dpb=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_dpb(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxBr) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-br=47893\r\n");
+ uint32_t maxBr;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_br(sdp_ptr_, 1, 0, 1, &maxBr));
+ ASSERT_EQ(47893U, maxBr);
+}
+
+TEST_F(SdpTest, parseFmtpMaxBrWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-br=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_br(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxBrWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-br=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_br(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpRedundantPicCap) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 redundant-pic-cap=1\r\n");
+ ASSERT_EQ(1, sdp_attr_fmtp_is_redundant_pic_cap(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpRedundantPicCapWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 redundant-pic-cap=0\r\n");
+ ASSERT_EQ(0, sdp_attr_fmtp_is_redundant_pic_cap(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpRedundantPicCapWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 redundant-pic-cap=2\r\n");
+ ASSERT_EQ(0, sdp_attr_fmtp_is_redundant_pic_cap(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpDeintBufCap) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 deint-buf-cap=4294967295\r\n");
+ uint32_t deintBufCap;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_deint_buf_cap(sdp_ptr_, 1, 0, 1, &deintBufCap));
+ ASSERT_EQ(4294967295, deintBufCap);
+}
+
+TEST_F(SdpTest, parseFmtpDeintBufCapWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 deint-buf-cap=0\r\n");
+ uint32_t deintBufCap;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_deint_buf_cap(sdp_ptr_, 1, 0, 1, &deintBufCap));
+ ASSERT_EQ(0U, deintBufCap);
+}
+
+TEST_F(SdpTest, parseFmtpDeintBufCapWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 deint-buf-cap=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_deint_buf_cap(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxRcmdNaluSize) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-rcmd-nalu-size=4294967295\r\n");
+ uint32_t maxRcmdNaluSize;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_rcmd_nalu_size(sdp_ptr_, 1, 0, 1, &maxRcmdNaluSize));
+ ASSERT_EQ(4294967295, maxRcmdNaluSize);
+}
+
+TEST_F(SdpTest, parseFmtpMaxRcmdNaluSizeWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-rcmd-nalu-size=0\r\n");
+ uint32_t maxRcmdNaluSize;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_rcmd_nalu_size(sdp_ptr_, 1, 0, 1, &maxRcmdNaluSize));
+ ASSERT_EQ(0U, maxRcmdNaluSize);
+}
+
+TEST_F(SdpTest, parseFmtpMaxRcmdNaluSizeWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-rcmd-nalu-size=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_rcmd_nalu_size(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpParameterAdd) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 parameter-add=1\r\n");
+ ASSERT_EQ(1, sdp_attr_fmtp_is_parameter_add(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpParameterAddWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 parameter-add=0\r\n");
+ ASSERT_EQ(0, sdp_attr_fmtp_is_parameter_add(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpParameterAddWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 parameter-add=2\r\n");
+ ASSERT_EQ(0, sdp_attr_fmtp_is_parameter_add(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexK) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 K=566\r\n");
+ ASSERT_EQ(566, sdp_attr_get_fmtp_annex_k_val(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexKWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 K=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_annex_k_val(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexKWith65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 K=65536\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_annex_k_val(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexN) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 N=4567\r\n");
+ ASSERT_EQ(4567, sdp_attr_get_fmtp_annex_n_val(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexNWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 N=0\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_annex_n_val(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexNWith65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 N=65536\r\n");
+ ASSERT_EQ(SDP_INVALID_VALUE, sdp_attr_get_fmtp_annex_n_val(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexP) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 P=5678,2\r\n");
+ ASSERT_EQ(5678, sdp_attr_get_fmtp_annex_p_picture_resize(sdp_ptr_, 1, 0, 1));
+ ASSERT_EQ(2, sdp_attr_get_fmtp_annex_p_warp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexPWithResize0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 P=0,3\r\n");
+ ASSERT_EQ(0, sdp_attr_get_fmtp_annex_p_picture_resize(sdp_ptr_, 1, 0, 1));
+ ASSERT_EQ(3, sdp_attr_get_fmtp_annex_p_warp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexPWithResize65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 P=65536,4\r\n");
+ ASSERT_EQ(0, sdp_attr_get_fmtp_annex_p_picture_resize(sdp_ptr_, 1, 0, 1));
+ // if the first fails, the second will too. Both default to 0 on failure.
+ ASSERT_EQ(0, sdp_attr_get_fmtp_annex_p_warp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpAnnexPWithWarp65536) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 P=346,65536\r\n");
+ ASSERT_EQ(346, sdp_attr_get_fmtp_annex_p_picture_resize(sdp_ptr_, 1, 0, 1));
+ ASSERT_EQ(0, sdp_attr_get_fmtp_annex_p_warp(sdp_ptr_, 1, 0, 1));
+}
+
+TEST_F(SdpTest, parseFmtpLevelAsymmetryAllowed) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 level-asymmetry-allowed=1\r\n");
+
+ uint16_t levelAsymmetryAllowed;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_level_asymmetry_allowed(sdp_ptr_, 1, 0, 1, &levelAsymmetryAllowed));
+ ASSERT_EQ(1U, levelAsymmetryAllowed);
+}
+
+TEST_F(SdpTest, parseFmtpLevelAsymmetryAllowedWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 level-asymmetry-allowed=0\r\n");
+ uint16_t levelAsymmetryAllowed;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_level_asymmetry_allowed(sdp_ptr_, 1, 0, 1, &levelAsymmetryAllowed));
+ ASSERT_EQ(0U, levelAsymmetryAllowed);
+}
+
+TEST_F(SdpTest, parseFmtpLevelAsymmetryAllowedWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 level-asymmetry-allowed=2\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_level_asymmetry_allowed(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxAverageBitrate) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxaveragebitrate=47893\r\n");
+ uint32_t maxAverageBitrate;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_max_average_bitrate(sdp_ptr_, 1, 0, 1, &maxAverageBitrate));
+ ASSERT_EQ(47893U, maxAverageBitrate);
+}
+
+TEST_F(SdpTest, parseFmtpMaxAverageBitrateWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxaveragebitrate=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_average_bitrate(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxAverageBitrateWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxaveragebitrate=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_average_bitrate(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpUsedTx) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 usedtx=1\r\n");
+ tinybool usedTx;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_usedtx(sdp_ptr_, 1, 0, 1, &usedTx));
+ ASSERT_EQ(1, usedTx);
+}
+
+TEST_F(SdpTest, parseFmtpUsedTxWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 usedtx=0\r\n");
+ tinybool usedTx;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_usedtx(sdp_ptr_, 1, 0, 1, &usedTx));
+ ASSERT_EQ(0, usedTx);
+}
+
+TEST_F(SdpTest, parseFmtpUsedTxWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 usedtx=2\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_usedtx(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpStereo) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 stereo=1\r\n");
+ tinybool stereo;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_stereo(sdp_ptr_, 1, 0, 1, &stereo));
+ ASSERT_EQ(1, stereo);
+}
+
+TEST_F(SdpTest, parseFmtpStereoWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 stereo=0\r\n");
+ tinybool stereo;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_stereo(sdp_ptr_, 1, 0, 1, &stereo));
+ ASSERT_EQ(0, stereo);
+}
+
+TEST_F(SdpTest, parseFmtpStereoWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 stereo=2\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_stereo(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpUseInBandFec) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 useinbandfec=1\r\n");
+ tinybool useInbandFec;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_useinbandfec(sdp_ptr_, 1, 0, 1, &useInbandFec));
+ ASSERT_EQ(1, useInbandFec);
+}
+
+TEST_F(SdpTest, parseFmtpUseInBandWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 useinbandfec=0\r\n");
+ tinybool useInbandFec;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_useinbandfec(sdp_ptr_, 1, 0, 1, &useInbandFec));
+ ASSERT_EQ(0, useInbandFec);
+}
+
+TEST_F(SdpTest, parseFmtpUseInBandWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 useinbandfec=2\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_useinbandfec(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxCodedAudioBandwidth) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxcodedaudiobandwidth=abcdefg\r\n");
+ char* maxCodedAudioBandwith = sdp_attr_get_fmtp_maxcodedaudiobandwidth(sdp_ptr_, 1, 0, 1);
+ ASSERT_EQ(0, strcmp("abcdefg", maxCodedAudioBandwith));
+}
+
+TEST_F(SdpTest, parseFmtpMaxCodedAudioBandwidthBad) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxcodedaudiobandwidth=\r\n");
+ char* maxCodedAudioBandwith = sdp_attr_get_fmtp_maxcodedaudiobandwidth(sdp_ptr_, 1, 0, 1);
+ ASSERT_EQ(0, *maxCodedAudioBandwith);
+}
+
+TEST_F(SdpTest, parseFmtpCbr) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cbr=1\r\n");
+ tinybool cbr;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_cbr(sdp_ptr_, 1, 0, 1, &cbr));
+ ASSERT_EQ(1, cbr);
+}
+
+TEST_F(SdpTest, parseFmtpCbrWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cbr=0\r\n");
+ tinybool cbr;
+ ASSERT_EQ(SDP_SUCCESS, sdp_attr_get_fmtp_cbr(sdp_ptr_, 1, 0, 1, &cbr));
+ ASSERT_EQ(0, cbr);
+}
+
+TEST_F(SdpTest, parseFmtpCbrWith2) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 cbr=2\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_cbr(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxPlaybackRate) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxplaybackrate=47900\r\n");
+ sdp_attr_t *attr_p = sdp_find_attr(sdp_ptr_, 1, 0, SDP_ATTR_FMTP, 1);
+ ASSERT_NE(NULL, attr_p);
+ ASSERT_EQ(47900U, attr_p->attr.fmtp.maxplaybackrate);
+}
+
+TEST_F(SdpTest, parseFmtpMaxPlaybackRateWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxplaybackrate=0\r\n");
+ sdp_attr_t *attr_p = sdp_find_attr(sdp_ptr_, 1, 0, SDP_ATTR_FMTP, 1);
+ ASSERT_EQ(NULL, attr_p);
+}
+
+TEST_F(SdpTest, parseFmtpMaxPlaybackRateWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 maxplaybackrate=4294967296\r\n");
+ sdp_attr_t *attr_p = sdp_find_attr(sdp_ptr_, 1, 0, SDP_ATTR_FMTP, 1);
+ ASSERT_EQ(NULL, attr_p);
+}
+
+TEST_F(SdpTest, parseFmtpMaxFs) {
+ uint32_t val = 0;
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-fs=300;max-fr=30\r\n");
+ ASSERT_EQ(sdp_attr_get_fmtp_max_fs(sdp_ptr_, 1, 0, 1, &val), SDP_SUCCESS);
+ ASSERT_EQ(val, 300U);
+}
+TEST_F(SdpTest, parseFmtpMaxFsWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-fs=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_fs(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxFsWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-fs=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_fs(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxFr) {
+ uint32_t val = 0;
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-fs=300;max-fr=30\r\n");
+ ASSERT_EQ(sdp_attr_get_fmtp_max_fr(sdp_ptr_, 1, 0, 1, &val), SDP_SUCCESS);
+ ASSERT_EQ(val, 30U);
+}
+
+TEST_F(SdpTest, parseFmtpMaxFrWith0) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-fr=0\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_fr(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, parseFmtpMaxFrWith4294967296) {
+ ParseSdp(kVideoSdp + "a=fmtp:120 max-fr=4294967296\r\n");
+ ASSERT_EQ(SDP_INVALID_PARAMETER, sdp_attr_get_fmtp_max_fr(sdp_ptr_, 1, 0, 1, nullptr));
+}
+
+TEST_F(SdpTest, addFmtpMaxFs) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewFmtpMaxFs(level, 300);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=fmtp:120 max-fs=300\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addFmtpMaxFr) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewFmtpMaxFr(level, 30);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=fmtp:120 max-fr=30\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, addFmtpMaxFsFr) {
+ InitLocalSdp();
+ int level = AddNewMedia(SDP_MEDIA_VIDEO);
+ AddNewFmtpMaxFsFr(level, 300, 30);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=fmtp:120 max-fs=300;max-fr=30\r\n"),
+ std::string::npos);
+}
+
+static const std::string kBrokenFmtp =
+ "v=0\r\n"
+ "o=- 4294967296 2 IN IP4 127.0.0.1\r\n"
+ "s=SIP Call\r\n"
+ "t=0 0\r\n"
+ "m=video 56436 RTP/SAVPF 120\r\n"
+ "c=IN IP4 198.51.100.7\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ /* Note: the \0 in this string triggered bz://1089207
+ */
+ "a=fmtp:120 max-fs=300;max\0fr=30";
+
+TEST_F(SdpTest, parseBrokenFmtp) {
+ uint32_t val = 0;
+ const char *buf = kBrokenFmtp.data();
+ ResetSdp();
+ /* We need to manually invoke the parser here to be able to specify the length
+ * of the string beyond the \0 in last line of the string.
+ */
+ ASSERT_EQ(sdp_parse(sdp_ptr_, buf, 165), SDP_SUCCESS);
+ ASSERT_EQ(sdp_attr_get_fmtp_max_fs(sdp_ptr_, 1, 0, 1, &val), SDP_INVALID_PARAMETER);
+}
+
+TEST_F(SdpTest, addIceLite) {
+ InitLocalSdp();
+ uint16_t inst_num = 0;
+ EXPECT_EQ(sdp_add_new_attr(sdp_ptr_, SDP_SESSION_LEVEL, 0,
+ SDP_ATTR_ICE_LITE, &inst_num), SDP_SUCCESS);
+ std::string body = SerializeSdp();
+ ASSERT_NE(body.find("a=ice-lite\r\n"), std::string::npos);
+}
+
+TEST_F(SdpTest, parseIceLite) {
+ std::string sdp =
+ "v=0\r\n"
+ "o=- 4294967296 2 IN IP4 127.0.0.1\r\n"
+ "s=SIP Call\r\n"
+ "t=0 0\r\n"
+ "a=ice-lite\r\n";
+ ParseSdp(sdp);
+ ASSERT_TRUE(sdp_attr_is_present(sdp_ptr_, SDP_ATTR_ICE_LITE,
+ SDP_SESSION_LEVEL, 0));
+}
+
+class NewSdpTest : public ::testing::Test,
+ public ::testing::WithParamInterface<bool> {
+ public:
+ NewSdpTest() {}
+
+ void ParseSdp(const std::string &sdp, bool expectSuccess = true) {
+ mSdp = mozilla::Move(mParser.Parse(sdp));
+
+ // Are we configured to do a parse and serialize before actually
+ // running the test?
+ if (GetParam()) {
+ std::stringstream os;
+
+ if (expectSuccess) {
+ ASSERT_TRUE(!!mSdp) << "Parse failed on first pass: "
+ << GetParseErrors();
+ }
+
+ if (mSdp) {
+ // Serialize and re-parse
+ mSdp->Serialize(os);
+ mSdp = mozilla::Move(mParser.Parse(os.str()));
+
+ // Whether we expected the parse to work or not, it should
+ // succeed the second time if it succeeded the first.
+ ASSERT_TRUE(!!mSdp) << "Parse failed on second pass, SDP was: "
+ << std::endl << os.str() << std::endl
+ << "Errors were: " << GetParseErrors();
+
+ // Serialize again and compare
+ std::stringstream os2;
+ mSdp->Serialize(os2);
+ ASSERT_EQ(os.str(), os2.str());
+ }
+ }
+
+ if (expectSuccess) {
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(0U, mParser.GetParseErrors().size())
+ << "Got unexpected parse errors/warnings: "
+ << GetParseErrors();
+ }
+ }
+
+ // For streaming parse errors
+ std::string GetParseErrors() const {
+ std::stringstream output;
+ for (auto e = mParser.GetParseErrors().begin();
+ e != mParser.GetParseErrors().end();
+ ++e) {
+ output << e->first << ": " << e->second << std::endl;
+ }
+ return output.str();
+ }
+
+ void CheckRtpmap(const std::string& expected_pt,
+ SdpRtpmapAttributeList::CodecType codec,
+ const std::string& name,
+ uint32_t clock,
+ uint16_t channels,
+ const std::string& search_pt,
+ const SdpRtpmapAttributeList& rtpmaps) const {
+ ASSERT_TRUE(rtpmaps.HasEntry(search_pt));
+ auto attr = rtpmaps.GetEntry(search_pt);
+ ASSERT_EQ(expected_pt, attr.pt);
+ ASSERT_EQ(codec, attr.codec);
+ ASSERT_EQ(name, attr.name);
+ ASSERT_EQ(clock, attr.clock);
+ ASSERT_EQ(channels, attr.channels);
+ }
+
+ void CheckSctpmap(const std::string& expected_pt,
+ const std::string& name,
+ uint16_t streams,
+ const std::string& search_pt,
+ const SdpSctpmapAttributeList& sctpmaps) const {
+ ASSERT_TRUE(sctpmaps.HasEntry(search_pt));
+ auto attr = sctpmaps.GetEntry(search_pt);
+ ASSERT_EQ(expected_pt, search_pt);
+ ASSERT_EQ(expected_pt, attr.pt);
+ ASSERT_EQ(name, attr.name);
+ ASSERT_EQ(streams, attr.streams);
+ }
+
+ void CheckRtcpFb(const SdpRtcpFbAttributeList::Feedback& feedback,
+ const std::string& pt,
+ SdpRtcpFbAttributeList::Type type,
+ const std::string& first_parameter,
+ const std::string& extra = "") const {
+ ASSERT_EQ(pt, feedback.pt);
+ ASSERT_EQ(type, feedback.type);
+ ASSERT_EQ(first_parameter, feedback.parameter);
+ ASSERT_EQ(extra, feedback.extra);
+ }
+
+ void CheckDtmfFmtp(const std::string& expectedDtmfTones) const {
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto audio_format_params =
+ mSdp->GetMediaSection(0).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(2U, audio_format_params.size());
+
+ ASSERT_EQ("101", audio_format_params[1].format);
+ ASSERT_TRUE(!!audio_format_params[1].parameters);
+ const SdpFmtpAttributeList::TelephoneEventParameters* te_parameters =
+ static_cast<SdpFmtpAttributeList::TelephoneEventParameters*>(
+ audio_format_params[1].parameters.get());
+ ASSERT_NE(0U, te_parameters->dtmfTones.size());
+ ASSERT_EQ(expectedDtmfTones, te_parameters->dtmfTones);
+ }
+
+ void CheckSerialize(const std::string& expected,
+ const SdpAttribute& attr) const {
+ std::stringstream str;
+ attr.Serialize(str);
+ ASSERT_EQ(expected, str.str());
+ }
+
+ SipccSdpParser mParser;
+ mozilla::UniquePtr<Sdp> mSdp;
+}; // class NewSdpTest
+
+TEST_P(NewSdpTest, CreateDestroy) {
+}
+
+TEST_P(NewSdpTest, ParseEmpty) {
+ ParseSdp("", false);
+ ASSERT_FALSE(mSdp);
+ ASSERT_NE(0U, mParser.GetParseErrors().size())
+ << "Expected at least one parse error.";
+}
+
+const std::string kBadSdp = "This is SDPARTA!!!!";
+
+TEST_P(NewSdpTest, ParseGarbage) {
+ ParseSdp(kBadSdp, false);
+ ASSERT_FALSE(mSdp);
+ ASSERT_NE(0U, mParser.GetParseErrors().size())
+ << "Expected at least one parse error.";
+}
+
+TEST_P(NewSdpTest, ParseGarbageTwice) {
+ ParseSdp(kBadSdp, false);
+ ASSERT_FALSE(mSdp);
+ size_t errorCount = mParser.GetParseErrors().size();
+ ASSERT_NE(0U, errorCount)
+ << "Expected at least one parse error.";
+ ParseSdp(kBadSdp, false);
+ ASSERT_FALSE(mSdp);
+ ASSERT_EQ(errorCount, mParser.GetParseErrors().size())
+ << "Expected same error count for same SDP.";
+}
+
+TEST_P(NewSdpTest, ParseMinimal) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(0U, mParser.GetParseErrors().size()) <<
+ "Got parse errors: " << GetParseErrors();
+}
+
+TEST_P(NewSdpTest, CheckOriginGetUsername) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ("-", mSdp->GetOrigin().GetUsername())
+ << "Wrong username in origin";
+}
+
+TEST_P(NewSdpTest, CheckOriginGetSessionId) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(4294967296U, mSdp->GetOrigin().GetSessionId())
+ << "Wrong session id in origin";
+}
+
+TEST_P(NewSdpTest, CheckOriginGetSessionVersion) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(2U , mSdp->GetOrigin().GetSessionVersion())
+ << "Wrong version in origin";
+}
+
+TEST_P(NewSdpTest, CheckOriginGetAddrType) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(sdp::kIPv4, mSdp->GetOrigin().GetAddrType())
+ << "Wrong address type in origin";
+}
+
+TEST_P(NewSdpTest, CheckOriginGetAddress) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ("127.0.0.1" , mSdp->GetOrigin().GetAddress())
+ << "Wrong address in origin";
+}
+
+TEST_P(NewSdpTest, CheckGetMissingBandwidth) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(0U, mSdp->GetBandwidth("CT"))
+ << "Wrong bandwidth in session";
+}
+
+TEST_P(NewSdpTest, CheckGetBandwidth) {
+ ParseSdp("v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "b=CT:5000" CRLF
+ "b=FOOBAR:10" CRLF
+ "b=AS:4" CRLF
+ "t=0 0" CRLF
+ "m=video 56436 RTP/SAVPF 120" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ );
+ ASSERT_EQ(5000U, mSdp->GetBandwidth("CT"))
+ << "Wrong CT bandwidth in session";
+ ASSERT_EQ(0U, mSdp->GetBandwidth("FOOBAR"))
+ << "Wrong FOOBAR bandwidth in session";
+ ASSERT_EQ(4U, mSdp->GetBandwidth("AS"))
+ << "Wrong AS bandwidth in session";
+}
+
+TEST_P(NewSdpTest, CheckGetMediaSectionsCount) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetMediaType) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(SdpMediaSection::kVideo, mSdp->GetMediaSection(0).GetMediaType())
+ << "Wrong type for first media section";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetProtocol) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf, mSdp->GetMediaSection(0).GetProtocol())
+ << "Wrong protocol for video";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetFormats) {
+ ParseSdp(kVideoSdp);
+ auto video_formats = mSdp->GetMediaSection(0).GetFormats();
+ ASSERT_EQ(1U, video_formats.size()) << "Wrong number of formats for video";
+ ASSERT_EQ("120", video_formats[0]);
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetPort) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(56436U, mSdp->GetMediaSection(0).GetPort())
+ << "Wrong port number in media section";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetMissingPortCount) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(0U, mSdp->GetMediaSection(0).GetPortCount())
+ << "Wrong port count in media section";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetPortCount) {
+ ParseSdp(kVideoSdp +
+ "m=audio 12345/2 RTP/SAVPF 0" CRLF
+ "a=rtpmap:0 PCMU/8000" CRLF
+ );
+ ASSERT_EQ(2U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+ ASSERT_EQ(2U, mSdp->GetMediaSection(1).GetPortCount())
+ << "Wrong port count in media section";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetMissingBandwidth) {
+ ParseSdp(kVideoSdp);
+ ASSERT_EQ(0U, mSdp->GetMediaSection(0).GetBandwidth("CT"))
+ << "Wrong bandwidth in media section";
+}
+
+TEST_P(NewSdpTest, CheckMediaSectionGetBandwidth) {
+ ParseSdp("v=0\r\n"
+ "o=- 4294967296 2 IN IP4 127.0.0.1\r\n"
+ "c=IN IP4 198.51.100.7\r\n"
+ "t=0 0\r\n"
+ "m=video 56436 RTP/SAVPF 120\r\n"
+ "b=CT:1000\r\n"
+ "a=rtpmap:120 VP8/90000\r\n");
+ ASSERT_EQ(1000U, mSdp->GetMediaSection(0).GetBandwidth("CT"))
+ << "Wrong bandwidth in media section";
+}
+
+// Define a string that is 258 characters long. We use a long string here so
+// that we can test that we are able to parse and handle a string longer than
+// the default maximum length of 256 in sipcc.
+#define ID_A "1234567890abcdef"
+#define ID_B ID_A ID_A ID_A ID_A
+#define LONG_IDENTITY ID_B ID_B ID_B ID_B "xx"
+
+#define BASE64_DTLS_HELLO "FgEAAAAAAAAAAAAAagEAAF4AAAAAAAAAXgEARI11KHx3QB6Ky" \
+ "CKgoBj/kwjKrApkL8kiZLwIqBaJGT8AAAA2ADkAOAA1ABYAEwAKADMAMgAvAAcAZgAFAAQAYw" \
+ "BiAGEAFQASAAkAZQBkAGAAFAARAAgABgADAQA="
+
+// SDP from a basic A/V apprtc call FFX/FFX
+const std::string kBasicAudioVideoOffer =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=dtls-message:client " BASE64_DTLS_HELLO CRLF
+"a=ice-ufrag:4a799b2e" CRLF
+"a=ice-pwd:e4cc12a910f106a0a744719425510e17" CRLF
+"a=ice-lite" CRLF
+"a=ice-options:trickle foo" CRLF
+"a=msid-semantic:WMS stream streama" CRLF
+"a=msid-semantic:foo stream" CRLF
+"a=fingerprint:sha-256 DF:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7:FA:FB:08:3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:08:6D:0F:4C" CRLF
+"a=identity:" LONG_IDENTITY CRLF
+"a=group:BUNDLE first second" CRLF
+"a=group:BUNDLE third" CRLF
+"a=group:LS first third" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=mid:first" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF
+"a=fmtp:109 maxplaybackrate=32000;stereo=1" CRLF
+"a=ptime:20" CRLF
+"a=maxptime:20" CRLF
+"a=rtpmap:9 G722/8000" CRLF
+"a=rtpmap:0 PCMU/8000" CRLF
+"a=rtpmap:8 PCMA/8000" CRLF
+"a=rtpmap:101 telephone-event/8000" CRLF
+"a=fmtp:101 0-15,66,32-34,67" CRLF
+"a=ice-ufrag:00000000" CRLF
+"a=ice-pwd:0000000000000000000000000000000" CRLF
+"a=sendonly" CRLF
+"a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level" CRLF
+"a=setup:actpass" CRLF
+"a=rtcp-mux" CRLF
+"a=msid:stream track" CRLF
+"a=candidate:0 1 UDP 2130379007 10.0.0.36 62453 typ host" CRLF
+"a=candidate:2 1 UDP 1694236671 24.6.134.204 62453 typ srflx raddr 10.0.0.36 rport 62453" CRLF
+"a=candidate:3 1 UDP 100401151 162.222.183.171 49761 typ relay raddr 162.222.183.171 rport 49761" CRLF
+"a=candidate:6 1 UDP 16515071 162.222.183.171 51858 typ relay raddr 162.222.183.171 rport 51858" CRLF
+"a=candidate:3 2 UDP 100401150 162.222.183.171 62454 typ relay raddr 162.222.183.171 rport 62454" CRLF
+"a=candidate:2 2 UDP 1694236670 24.6.134.204 55428 typ srflx raddr 10.0.0.36 rport 55428" CRLF
+"a=candidate:6 2 UDP 16515070 162.222.183.171 50340 typ relay raddr 162.222.183.171 rport 50340" CRLF
+"a=candidate:0 2 UDP 2130379006 10.0.0.36 55428 typ host" CRLF
+"a=rtcp:62454 IN IP4 162.222.183.171" CRLF
+"a=end-of-candidates" CRLF
+"a=ssrc:5150" CRLF
+"m=video 9 RTP/SAVPF 120 121 122 123" CRLF
+"c=IN IP6 ::1" CRLF
+"a=fingerprint:sha-1 DF:FA:FB:08:3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:08:6D:0F:4C:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7" CRLF
+"a=mid:second" CRLF
+"a=rtpmap:120 VP8/90000" CRLF
+"a=fmtp:120 max-fs=3600;max-fr=30" CRLF
+"a=rtpmap:121 VP9/90000" CRLF
+"a=fmtp:121 max-fs=3600;max-fr=30" CRLF
+"a=rtpmap:122 red/90000" CRLF
+"a=rtpmap:123 ulpfec/90000" CRLF
+"a=recvonly" CRLF
+"a=rtcp-fb:120 nack" CRLF
+"a=rtcp-fb:120 nack pli" CRLF
+"a=rtcp-fb:120 ccm fir" CRLF
+"a=rtcp-fb:121 nack" CRLF
+"a=rtcp-fb:121 nack pli" CRLF
+"a=rtcp-fb:121 ccm fir" CRLF
+"a=setup:active" CRLF
+"a=rtcp-mux" CRLF
+"a=msid:streama tracka" CRLF
+"a=msid:streamb trackb" CRLF
+"a=candidate:0 1 UDP 2130379007 10.0.0.36 59530 typ host" CRLF
+"a=candidate:0 2 UDP 2130379006 10.0.0.36 64378 typ host" CRLF
+"a=candidate:2 2 UDP 1694236670 24.6.134.204 64378 typ srflx raddr 10.0.0.36 rport 64378" CRLF
+"a=candidate:6 2 UDP 16515070 162.222.183.171 64941 typ relay raddr 162.222.183.171 rport 64941" CRLF
+"a=candidate:6 1 UDP 16515071 162.222.183.171 64800 typ relay raddr 162.222.183.171 rport 64800" CRLF
+"a=candidate:2 1 UDP 1694236671 24.6.134.204 59530 typ srflx raddr 10.0.0.36 rport 59530" CRLF
+"a=candidate:3 1 UDP 100401151 162.222.183.171 62935 typ relay raddr 162.222.183.171 rport 62935" CRLF
+"a=candidate:3 2 UDP 100401150 162.222.183.171 61026 typ relay raddr 162.222.183.171 rport 61026" CRLF
+"a=rtcp:61026" CRLF
+"a=end-of-candidates" CRLF
+"a=ssrc:1111 foo" CRLF
+"a=ssrc:1111 foo:bar" CRLF
+"a=imageattr:120 send * recv *" CRLF
+"a=imageattr:121 send [x=640,y=480] recv [x=640,y=480]" CRLF
+"a=simulcast:recv pt=120;121" CRLF
+"a=rid:bar recv pt=96;max-width=800;max-height=600" CRLF
+"m=audio 9 RTP/SAVPF 0" CRLF
+"a=mid:third" CRLF
+"a=rtpmap:0 PCMU/8000" CRLF
+"a=ice-lite" CRLF
+"a=ice-options:foo bar" CRLF
+"a=msid:noappdata" CRLF
+"a=bundle-only" CRLF;
+
+TEST_P(NewSdpTest, BasicAudioVideoSdpParse) {
+ ParseSdp(kBasicAudioVideoOffer);
+}
+
+TEST_P(NewSdpTest, CheckRemoveFmtp) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ SdpAttributeList& audioAttrList = mSdp->GetMediaSection(0).GetAttributeList();
+
+ ASSERT_TRUE(audioAttrList.HasAttribute(SdpAttribute::kFmtpAttribute));
+ ASSERT_EQ(2U, audioAttrList.GetFmtp().mFmtps.size());
+ ASSERT_TRUE(mSdp->GetMediaSection(0).FindFmtp("109"));
+ ASSERT_TRUE(mSdp->GetMediaSection(0).FindFmtp("101"));
+
+ mSdp->GetMediaSection(0).RemoveFmtp("101");
+
+ ASSERT_TRUE(audioAttrList.HasAttribute(SdpAttribute::kFmtpAttribute));
+ ASSERT_EQ(1U, audioAttrList.GetFmtp().mFmtps.size());
+ ASSERT_TRUE(mSdp->GetMediaSection(0).FindFmtp("109"));
+ ASSERT_FALSE(mSdp->GetMediaSection(0).FindFmtp("101"));
+
+ mSdp->GetMediaSection(0).RemoveFmtp("109");
+
+ ASSERT_TRUE(audioAttrList.HasAttribute(SdpAttribute::kFmtpAttribute));
+ ASSERT_EQ(0U, audioAttrList.GetFmtp().mFmtps.size());
+ ASSERT_FALSE(mSdp->GetMediaSection(0).FindFmtp("109"));
+ ASSERT_FALSE(mSdp->GetMediaSection(0).FindFmtp("101"));
+
+ // make sure we haven't disturbed the video fmtps
+ SdpAttributeList& videoAttrList = mSdp->GetMediaSection(1).GetAttributeList();
+ ASSERT_TRUE(videoAttrList.HasAttribute(SdpAttribute::kFmtpAttribute));
+ ASSERT_EQ(2U, videoAttrList.GetFmtp().mFmtps.size());
+ ASSERT_TRUE(mSdp->GetMediaSection(1).FindFmtp("120"));
+ ASSERT_TRUE(mSdp->GetMediaSection(1).FindFmtp("121"));
+}
+
+TEST_P(NewSdpTest, CheckIceUfrag) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kIceUfragAttribute));
+ auto ice_ufrag = mSdp->GetAttributeList().GetIceUfrag();
+ ASSERT_EQ("4a799b2e", ice_ufrag) << "Wrong ice-ufrag value";
+
+ ice_ufrag = mSdp->GetMediaSection(0)
+ .GetAttributeList().GetIceUfrag();
+ ASSERT_EQ("00000000", ice_ufrag) << "ice-ufrag isn't overridden";
+
+ ice_ufrag = mSdp->GetMediaSection(1)
+ .GetAttributeList().GetIceUfrag();
+ ASSERT_EQ("4a799b2e", ice_ufrag) << "ice-ufrag isn't carried to m-section";
+}
+
+TEST_P(NewSdpTest, CheckIcePwd) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kIcePwdAttribute));
+ auto ice_pwd = mSdp->GetAttributeList().GetIcePwd();
+ ASSERT_EQ("e4cc12a910f106a0a744719425510e17", ice_pwd) << "Wrong ice-pwd value";
+
+ ice_pwd = mSdp->GetMediaSection(0)
+ .GetAttributeList().GetIcePwd();
+ ASSERT_EQ("0000000000000000000000000000000", ice_pwd)
+ << "ice-pwd isn't overridden";
+
+ ice_pwd = mSdp->GetMediaSection(1)
+ .GetAttributeList().GetIcePwd();
+ ASSERT_EQ("e4cc12a910f106a0a744719425510e17", ice_pwd)
+ << "ice-pwd isn't carried to m-section";
+}
+
+TEST_P(NewSdpTest, CheckIceOptions) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kIceOptionsAttribute));
+ auto ice_options = mSdp->GetAttributeList().GetIceOptions();
+ ASSERT_EQ(2U, ice_options.mValues.size()) << "Wrong ice-options size";
+ ASSERT_EQ("trickle", ice_options.mValues[0]) << "Wrong ice-options value";
+ ASSERT_EQ("foo", ice_options.mValues[1]) << "Wrong ice-options value";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceOptionsAttribute));
+ auto ice_options_media_level =
+ mSdp->GetMediaSection(2).GetAttributeList().GetIceOptions();
+ ASSERT_EQ(2U, ice_options_media_level.mValues.size()) << "Wrong ice-options size";
+ ASSERT_EQ("foo", ice_options_media_level.mValues[0]) << "Wrong ice-options value";
+ ASSERT_EQ("bar", ice_options_media_level.mValues[1]) << "Wrong ice-options value";
+}
+
+TEST_P(NewSdpTest, CheckFingerprint) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kFingerprintAttribute));
+ auto fingerprints = mSdp->GetAttributeList().GetFingerprint();
+ ASSERT_EQ(1U, fingerprints.mFingerprints.size());
+ ASSERT_EQ(SdpFingerprintAttributeList::kSha256,
+ fingerprints.mFingerprints[0].hashFunc)
+ << "Wrong hash function";
+ ASSERT_EQ("DF:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7:FA:FB:08:"
+ "3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:08:6D:0F:4C",
+ SdpFingerprintAttributeList::FormatFingerprint(
+ fingerprints.mFingerprints[0].fingerprint))
+ << "Wrong fingerprint";
+ ASSERT_EQ(0xdfU, fingerprints.mFingerprints[0].fingerprint[0])
+ << "first fingerprint element is iffy";
+
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount());
+
+ // Fallback to session level
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFingerprintAttribute));
+ fingerprints = mSdp->GetMediaSection(0).GetAttributeList().GetFingerprint();
+ ASSERT_EQ(1U, fingerprints.mFingerprints.size());
+ ASSERT_EQ(SdpFingerprintAttributeList::kSha256,
+ fingerprints.mFingerprints[0].hashFunc)
+ << "Wrong hash function";
+ ASSERT_EQ("DF:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7:FA:FB:08:"
+ "3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:08:6D:0F:4C",
+ SdpFingerprintAttributeList::FormatFingerprint(
+ fingerprints.mFingerprints[0].fingerprint))
+ << "Wrong fingerprint";
+ ASSERT_EQ(0xdfU, fingerprints.mFingerprints[0].fingerprint[0])
+ << "first fingerprint element is iffy";
+
+ // Media level
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kFingerprintAttribute));
+ fingerprints = mSdp->GetMediaSection(1).GetAttributeList().GetFingerprint();
+ ASSERT_EQ(1U, fingerprints.mFingerprints.size());
+ ASSERT_EQ(SdpFingerprintAttributeList::kSha1,
+ fingerprints.mFingerprints[0].hashFunc)
+ << "Wrong hash function";
+ ASSERT_EQ("DF:FA:FB:08:3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:"
+ "08:6D:0F:4C:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7",
+ SdpFingerprintAttributeList::FormatFingerprint(
+ fingerprints.mFingerprints[0].fingerprint))
+ << "Wrong fingerprint";
+ ASSERT_EQ(0xdfU, fingerprints.mFingerprints[0].fingerprint[0])
+ << "first fingerprint element is iffy";
+}
+
+TEST_P(NewSdpTest, CheckIdentity) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kIdentityAttribute));
+ auto identity = mSdp->GetAttributeList().GetIdentity();
+ ASSERT_EQ(LONG_IDENTITY, identity) << "Wrong identity assertion";
+}
+
+TEST_P(NewSdpTest, CheckDtlsMessage) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kDtlsMessageAttribute));
+ auto dtls_message = mSdp->GetAttributeList().GetDtlsMessage();
+ ASSERT_EQ(SdpDtlsMessageAttribute::kClient, dtls_message.mRole)
+ << "Wrong dtls-message role";
+ ASSERT_EQ(BASE64_DTLS_HELLO, dtls_message.mValue)
+ << "Wrong dtls-message value";
+}
+
+TEST_P(NewSdpTest, CheckNumberOfMediaSections) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+}
+
+TEST_P(NewSdpTest, CheckMlines) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+ ASSERT_EQ(SdpMediaSection::kAudio, mSdp->GetMediaSection(0).GetMediaType())
+ << "Wrong type for first media section";
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ mSdp->GetMediaSection(0).GetProtocol())
+ << "Wrong protocol for audio";
+ auto audio_formats = mSdp->GetMediaSection(0).GetFormats();
+ ASSERT_EQ(5U, audio_formats.size()) << "Wrong number of formats for audio";
+ ASSERT_EQ("109", audio_formats[0]);
+ ASSERT_EQ("9", audio_formats[1]);
+ ASSERT_EQ("0", audio_formats[2]);
+ ASSERT_EQ("8", audio_formats[3]);
+ ASSERT_EQ("101", audio_formats[4]);
+
+ ASSERT_EQ(SdpMediaSection::kVideo, mSdp->GetMediaSection(1).GetMediaType())
+ << "Wrong type for second media section";
+ ASSERT_EQ(SdpMediaSection::kRtpSavpf,
+ mSdp->GetMediaSection(1).GetProtocol())
+ << "Wrong protocol for video";
+ auto video_formats = mSdp->GetMediaSection(1).GetFormats();
+ ASSERT_EQ(4U, video_formats.size()) << "Wrong number of formats for video";
+ ASSERT_EQ("120", video_formats[0]);
+ ASSERT_EQ("121", video_formats[1]);
+ ASSERT_EQ("122", video_formats[2]);
+ ASSERT_EQ("123", video_formats[3]);
+
+ ASSERT_EQ(SdpMediaSection::kAudio, mSdp->GetMediaSection(2).GetMediaType())
+ << "Wrong type for third media section";
+}
+
+TEST_P(NewSdpTest, CheckSetup) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSetupAttribute));
+ ASSERT_EQ(SdpSetupAttribute::kActpass,
+ mSdp->GetMediaSection(0).GetAttributeList().GetSetup().mRole);
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kSetupAttribute));
+ ASSERT_EQ(SdpSetupAttribute::kActive,
+ mSdp->GetMediaSection(1).GetAttributeList().GetSetup().mRole);
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kSetupAttribute));
+}
+
+TEST_P(NewSdpTest, CheckSsrc)
+{
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+ auto ssrcs = mSdp->GetMediaSection(0).GetAttributeList().GetSsrc().mSsrcs;
+ ASSERT_EQ(1U, ssrcs.size());
+ ASSERT_EQ(5150U, ssrcs[0].ssrc);
+ ASSERT_EQ("", ssrcs[0].attribute);
+
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+ ssrcs = mSdp->GetMediaSection(1).GetAttributeList().GetSsrc().mSsrcs;
+ ASSERT_EQ(2U, ssrcs.size());
+ ASSERT_EQ(1111U, ssrcs[0].ssrc);
+ ASSERT_EQ("foo", ssrcs[0].attribute);
+ ASSERT_EQ(1111U, ssrcs[1].ssrc);
+ ASSERT_EQ("foo:bar", ssrcs[1].attribute);
+}
+
+TEST_P(NewSdpTest, CheckRtpmap) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ const SdpMediaSection& audiosec = mSdp->GetMediaSection(0);
+ const SdpRtpmapAttributeList& rtpmap = audiosec.GetAttributeList().GetRtpmap();
+ ASSERT_EQ(5U, rtpmap.mRtpmaps.size())
+ << "Wrong number of rtpmap attributes for audio";
+
+ // Need to know name of type
+ CheckRtpmap("109",
+ SdpRtpmapAttributeList::kOpus,
+ "opus",
+ 48000,
+ 2,
+ audiosec.GetFormats()[0],
+ rtpmap);
+
+ CheckRtpmap("9",
+ SdpRtpmapAttributeList::kG722,
+ "G722",
+ 8000,
+ 1,
+ audiosec.GetFormats()[1],
+ rtpmap);
+
+ CheckRtpmap("0",
+ SdpRtpmapAttributeList::kPCMU,
+ "PCMU",
+ 8000,
+ 1,
+ audiosec.GetFormats()[2],
+ rtpmap);
+
+ CheckRtpmap("8",
+ SdpRtpmapAttributeList::kPCMA,
+ "PCMA",
+ 8000,
+ 1,
+ audiosec.GetFormats()[3],
+ rtpmap);
+
+ CheckRtpmap("101",
+ SdpRtpmapAttributeList::kTelephoneEvent,
+ "telephone-event",
+ 8000,
+ 1,
+ audiosec.GetFormats()[4],
+ rtpmap);
+
+ const SdpMediaSection& videosec = mSdp->GetMediaSection(1);
+ const SdpRtpmapAttributeList videoRtpmap =
+ videosec.GetAttributeList().GetRtpmap();
+ ASSERT_EQ(4U, videoRtpmap.mRtpmaps.size())
+ << "Wrong number of rtpmap attributes for video";
+
+ CheckRtpmap("120",
+ SdpRtpmapAttributeList::kVP8,
+ "VP8",
+ 90000,
+ 0,
+ videosec.GetFormats()[0],
+ videoRtpmap);
+
+ CheckRtpmap("121",
+ SdpRtpmapAttributeList::kVP9,
+ "VP9",
+ 90000,
+ 0,
+ videosec.GetFormats()[1],
+ videoRtpmap);
+
+ CheckRtpmap("122",
+ SdpRtpmapAttributeList::kRed,
+ "red",
+ 90000,
+ 0,
+ videosec.GetFormats()[2],
+ videoRtpmap);
+
+ CheckRtpmap("123",
+ SdpRtpmapAttributeList::kUlpfec,
+ "ulpfec",
+ 90000,
+ 0,
+ videosec.GetFormats()[3],
+ videoRtpmap);
+}
+
+static const std::string kAudioWithTelephoneEvent =
+ "v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "t=0 0" CRLF
+ "m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+ "c=IN IP4 0.0.0.0" CRLF
+ "a=mid:first" CRLF
+ "a=rtpmap:109 opus/48000/2" CRLF
+ "a=fmtp:109 maxplaybackrate=32000;stereo=1" CRLF
+ "a=ptime:20" CRLF
+ "a=maxptime:20" CRLF
+ "a=rtpmap:9 G722/8000" CRLF
+ "a=rtpmap:0 PCMU/8000" CRLF
+ "a=rtpmap:8 PCMA/8000" CRLF
+ "a=rtpmap:101 telephone-event/8000" CRLF;
+
+TEST_P(NewSdpTest, CheckTelephoneEventNoFmtp) {
+ ParseSdp(kAudioWithTelephoneEvent);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto audio_format_params =
+ mSdp->GetMediaSection(0).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(1U, audio_format_params.size());
+
+ // make sure we don't get a fmtp for codec 101
+ for (size_t i = 0; i < audio_format_params.size(); ++i) {
+ ASSERT_NE("101", audio_format_params[i].format);
+ }
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventWithDefaultEvents) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 0-15" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventWithBadCharacter) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 0-5." CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventIncludingCommas) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 0-15,66,67" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("0-15,66,67");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventComplexEvents) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 0,1,2-4,5-15,66,67" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("0,1,2-4,5-15,66,67");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventNoHyphen) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 5,6,7" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("5,6,7");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventOnlyZero) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 0" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("0");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventOnlyOne) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 1" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ CheckDtmfFmtp("1");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadThreeDigit) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 123" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadThreeDigitWithHyphen) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 0-123" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadLeadingHyphen) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 -12" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadTrailingHyphen) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 12-" CRLF, false);
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadTrailingHyphenInMiddle) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 1,12-,4" CRLF, false);
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadLeadingComma) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 ,2,3" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadMultipleLeadingComma) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 ,,,2,3" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadConsecutiveCommas) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 1,,,,,,,,3" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadTrailingComma) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 1,2,3," CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadTwoHyphens) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 1-2-3" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadSixDigit) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 112233" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+TEST_P(NewSdpTest, CheckTelephoneEventBadRangeReversed) {
+ ParseSdp(kAudioWithTelephoneEvent
+ + "a=fmtp:101 33-2" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ // check for the default dtmf tones
+ CheckDtmfFmtp("0-15");
+}
+
+static const std::string kVideoWithRedAndUlpfecSdp =
+ "v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "t=0 0" CRLF
+ "m=video 9 RTP/SAVPF 97 120 121 122 123" CRLF
+ "c=IN IP6 ::1" CRLF
+ "a=fingerprint:sha-1 DF:FA:FB:08:3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:08:6D:0F:4C:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7" CRLF
+ "a=rtpmap:97 H264/90000" CRLF
+ "a=fmtp:97 profile-level-id=42a01e" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ "a=fmtp:120 max-fs=3600;max-fr=30" CRLF
+ "a=rtpmap:121 VP9/90000" CRLF
+ "a=fmtp:121 max-fs=3600;max-fr=30" CRLF
+ "a=rtpmap:122 red/90000" CRLF
+ "a=rtpmap:123 ulpfec/90000" CRLF;
+
+TEST_P(NewSdpTest, CheckRedNoFmtp) {
+ ParseSdp(kVideoWithRedAndUlpfecSdp);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto video_format_params =
+ mSdp->GetMediaSection(0).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(3U, video_format_params.size());
+
+ // make sure we don't get a fmtp for codec 122
+ for (size_t i = 0; i < video_format_params.size(); ++i) {
+ ASSERT_NE("122", video_format_params[i].format);
+ }
+}
+
+TEST_P(NewSdpTest, CheckRedFmtpWith2Codecs) {
+ ParseSdp(kVideoWithRedAndUlpfecSdp + "a=fmtp:122 120/121" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto video_format_params =
+ mSdp->GetMediaSection(0).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(4U, video_format_params.size());
+
+ ASSERT_EQ("122", video_format_params[3].format);
+ ASSERT_TRUE(!!video_format_params[3].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kRed,
+ video_format_params[3].parameters->codec_type);
+ const SdpFmtpAttributeList::RedParameters* red_parameters(
+ static_cast<SdpFmtpAttributeList::RedParameters*>(
+ video_format_params[3].parameters.get()));
+ ASSERT_EQ(2U, red_parameters->encodings.size());
+ ASSERT_EQ(120U, red_parameters->encodings[0]);
+ ASSERT_EQ(121U, red_parameters->encodings[1]);
+}
+
+TEST_P(NewSdpTest, CheckRedFmtpWith3Codecs) {
+ ParseSdp(kVideoWithRedAndUlpfecSdp + "a=fmtp:122 120/121/123" CRLF);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(1U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto video_format_params =
+ mSdp->GetMediaSection(0).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(4U, video_format_params.size());
+
+ ASSERT_EQ("122", video_format_params[3].format);
+ ASSERT_TRUE(!!video_format_params[3].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kRed,
+ video_format_params[3].parameters->codec_type);
+ const SdpFmtpAttributeList::RedParameters* red_parameters(
+ static_cast<SdpFmtpAttributeList::RedParameters*>(
+ video_format_params[3].parameters.get()));
+ ASSERT_EQ(3U, red_parameters->encodings.size());
+ ASSERT_EQ(120U, red_parameters->encodings[0]);
+ ASSERT_EQ(121U, red_parameters->encodings[1]);
+ ASSERT_EQ(123U, red_parameters->encodings[2]);
+}
+
+const std::string kH264AudioVideoOffer =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=ice-ufrag:4a799b2e" CRLF
+"a=ice-pwd:e4cc12a910f106a0a744719425510e17" CRLF
+"a=ice-lite" CRLF
+"a=msid-semantic:WMS stream streama" CRLF
+"a=fingerprint:sha-256 DF:2E:AC:8A:FD:0A:8E:99:BF:5D:E8:3C:E7:FA:FB:08:3B:3C:54:1D:D7:D4:05:77:A0:72:9B:14:08:6D:0F:4C" CRLF
+"a=group:BUNDLE first second" CRLF
+"a=group:BUNDLE third" CRLF
+"a=group:LS first third" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=mid:first" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF
+"a=ptime:20" CRLF
+"a=maxptime:20" CRLF
+"a=rtpmap:9 G722/8000" CRLF
+"a=rtpmap:0 PCMU/8000" CRLF
+"a=rtpmap:8 PCMA/8000" CRLF
+"a=rtpmap:101 telephone-event/8000" CRLF
+"a=fmtp:109 maxplaybackrate=32000;stereo=1;useinbandfec=1" CRLF
+"a=fmtp:101 0-15,66,32-34,67" CRLF
+"a=ice-ufrag:00000000" CRLF
+"a=ice-pwd:0000000000000000000000000000000" CRLF
+"a=sendonly" CRLF
+"a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level" CRLF
+"a=setup:actpass" CRLF
+"a=rtcp-mux" CRLF
+"a=msid:stream track" CRLF
+"a=candidate:0 1 UDP 2130379007 10.0.0.36 62453 typ host" CRLF
+"a=candidate:2 1 UDP 1694236671 24.6.134.204 62453 typ srflx raddr 10.0.0.36 rport 62453" CRLF
+"a=candidate:3 1 UDP 100401151 162.222.183.171 49761 typ relay raddr 162.222.183.171 rport 49761" CRLF
+"a=candidate:6 1 UDP 16515071 162.222.183.171 51858 typ relay raddr 162.222.183.171 rport 51858" CRLF
+"a=candidate:3 2 UDP 100401150 162.222.183.171 62454 typ relay raddr 162.222.183.171 rport 62454" CRLF
+"a=candidate:2 2 UDP 1694236670 24.6.134.204 55428 typ srflx raddr 10.0.0.36 rport 55428" CRLF
+"a=candidate:6 2 UDP 16515070 162.222.183.171 50340 typ relay raddr 162.222.183.171 rport 50340" CRLF
+"a=candidate:0 2 UDP 2130379006 10.0.0.36 55428 typ host" CRLF
+"m=video 9 RTP/SAVPF 97 98 120" CRLF
+"c=IN IP6 ::1" CRLF
+"a=mid:second" CRLF
+"a=rtpmap:97 H264/90000" CRLF
+"a=fmtp:97 profile-level-id=42a01e" CRLF
+"a=rtpmap:98 H264/90000" CRLF
+"a=fmtp:98 PROFILE=0;LEVEL=0;profile-level-id=42a00d;packetization-mode=1;level-asymmetry-allowed=1;max-mbps=42000;max-fs=1400;max-cpb=1000;max-dpb=1000;max-br=180000;parameter-add=1;usedtx=0;stereo=0;useinbandfec=0;cbr=0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF
+"a=fmtp:120 max-fs=3601;max-fr=31" CRLF
+"a=recvonly" CRLF
+"a=setup:active" CRLF
+"a=rtcp-mux" CRLF
+"a=msid:streama tracka" CRLF
+"a=msid:streamb trackb" CRLF
+"a=candidate:0 1 UDP 2130379007 10.0.0.36 59530 typ host" CRLF
+"a=candidate:0 2 UDP 2130379006 10.0.0.36 64378 typ host" CRLF
+"a=candidate:2 2 UDP 1694236670 24.6.134.204 64378 typ srflx raddr 10.0.0.36 rport 64378" CRLF
+"a=candidate:6 2 UDP 16515070 162.222.183.171 64941 typ relay raddr 162.222.183.171 rport 64941" CRLF
+"a=candidate:6 1 UDP 16515071 162.222.183.171 64800 typ relay raddr 162.222.183.171 rport 64800" CRLF
+"a=candidate:2 1 UDP 1694236671 24.6.134.204 59530 typ srflx raddr 10.0.0.36 rport 59530" CRLF
+"a=candidate:3 1 UDP 100401151 162.222.183.171 62935 typ relay raddr 162.222.183.171 rport 62935" CRLF
+"a=candidate:3 2 UDP 100401150 162.222.183.171 61026 typ relay raddr 162.222.183.171 rport 61026" CRLF
+"m=audio 9 RTP/SAVPF 0" CRLF
+"a=mid:third" CRLF
+"a=rtpmap:0 PCMU/8000" CRLF
+"a=ice-lite" CRLF
+"a=msid:noappdata" CRLF;
+
+TEST_P(NewSdpTest, CheckFormatParameters) {
+ ParseSdp(kH264AudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto audio_format_params =
+ mSdp->GetMediaSection(0).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(2U, audio_format_params.size());
+ ASSERT_EQ("109", audio_format_params[0].format);
+ ASSERT_TRUE(!!audio_format_params[0].parameters);
+ const SdpFmtpAttributeList::OpusParameters* opus_parameters =
+ static_cast<SdpFmtpAttributeList::OpusParameters*>(
+ audio_format_params[0].parameters.get());
+ ASSERT_EQ(32000U, opus_parameters->maxplaybackrate);
+ ASSERT_EQ(1U, opus_parameters->stereo);
+ ASSERT_EQ(1U, opus_parameters->useInBandFec);
+ ASSERT_EQ("101", audio_format_params[1].format);
+ ASSERT_TRUE(!!audio_format_params[1].parameters);
+ const SdpFmtpAttributeList::TelephoneEventParameters* te_parameters =
+ static_cast<SdpFmtpAttributeList::TelephoneEventParameters*>(
+ audio_format_params[1].parameters.get());
+ ASSERT_NE(0U, te_parameters->dtmfTones.size());
+ ASSERT_EQ("0-15,66,32-34,67", te_parameters->dtmfTones);
+
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ auto video_format_params =
+ mSdp->GetMediaSection(1).GetAttributeList().GetFmtp().mFmtps;
+ ASSERT_EQ(3U, video_format_params.size());
+ ASSERT_EQ("97", video_format_params[0].format);
+ ASSERT_TRUE(!!video_format_params[0].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kH264,
+ video_format_params[0].parameters->codec_type);
+ const SdpFmtpAttributeList::H264Parameters *h264_parameters(
+ static_cast<SdpFmtpAttributeList::H264Parameters*>(
+ video_format_params[0].parameters.get()));
+ ASSERT_EQ((uint32_t)0x42a01e, h264_parameters->profile_level_id);
+ ASSERT_EQ(0U, h264_parameters->packetization_mode);
+ ASSERT_FALSE(static_cast<bool>(h264_parameters->level_asymmetry_allowed));
+ ASSERT_EQ(0U, h264_parameters->max_mbps);
+ ASSERT_EQ(0U, h264_parameters->max_fs);
+ ASSERT_EQ(0U, h264_parameters->max_cpb);
+ ASSERT_EQ(0U, h264_parameters->max_dpb);
+ ASSERT_EQ(0U, h264_parameters->max_br);
+
+ ASSERT_EQ("98", video_format_params[1].format);
+ ASSERT_TRUE(!!video_format_params[1].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kH264,
+ video_format_params[1].parameters->codec_type);
+ h264_parameters =
+ static_cast<SdpFmtpAttributeList::H264Parameters*>(
+ video_format_params[1].parameters.get());
+ ASSERT_EQ((uint32_t)0x42a00d, h264_parameters->profile_level_id);
+ ASSERT_EQ(1U, h264_parameters->packetization_mode);
+ ASSERT_TRUE(static_cast<bool>(h264_parameters->level_asymmetry_allowed));
+ ASSERT_EQ(42000U, h264_parameters->max_mbps);
+ ASSERT_EQ(1400U, h264_parameters->max_fs);
+ ASSERT_EQ(1000U, h264_parameters->max_cpb);
+ ASSERT_EQ(1000U, h264_parameters->max_dpb);
+ ASSERT_EQ(180000U, h264_parameters->max_br);
+
+ ASSERT_EQ("120", video_format_params[2].format);
+ ASSERT_TRUE(!!video_format_params[2].parameters);
+ ASSERT_EQ(SdpRtpmapAttributeList::kVP8,
+ video_format_params[2].parameters->codec_type);
+ const SdpFmtpAttributeList::VP8Parameters *vp8_parameters =
+ static_cast<SdpFmtpAttributeList::VP8Parameters*>(
+ video_format_params[2].parameters.get());
+ ASSERT_EQ(3601U, vp8_parameters->max_fs);
+ ASSERT_EQ(31U, vp8_parameters->max_fr);
+
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+}
+
+TEST_P(NewSdpTest, CheckPtime) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_EQ(20U, mSdp->GetMediaSection(0).GetAttributeList().GetPtime());
+ ASSERT_FALSE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kPtimeAttribute));
+}
+
+TEST_P(NewSdpTest, CheckFlags) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kIceLiteAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceLiteAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceLiteAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceLiteAttribute));
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kEndOfCandidatesAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kEndOfCandidatesAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kEndOfCandidatesAttribute));
+}
+
+TEST_P(NewSdpTest, CheckConnectionLines) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ const SdpConnection& conn1 = mSdp->GetMediaSection(0).GetConnection();
+ ASSERT_EQ(sdp::kIPv4, conn1.GetAddrType());
+ ASSERT_EQ("0.0.0.0", conn1.GetAddress());
+ ASSERT_EQ(0U, conn1.GetTtl());
+ ASSERT_EQ(0U, conn1.GetCount());
+
+ const SdpConnection& conn2 = mSdp->GetMediaSection(1).GetConnection();
+ ASSERT_EQ(sdp::kIPv6, conn2.GetAddrType());
+ ASSERT_EQ("::1", conn2.GetAddress());
+ ASSERT_EQ(0U, conn2.GetTtl());
+ ASSERT_EQ(0U, conn2.GetCount());
+
+ // tests that we can fall through to session level as appropriate
+ const SdpConnection& conn3 = mSdp->GetMediaSection(2).GetConnection();
+ ASSERT_EQ(sdp::kIPv4, conn3.GetAddrType());
+ ASSERT_EQ("224.0.0.1", conn3.GetAddress());
+ ASSERT_EQ(100U, conn3.GetTtl());
+ ASSERT_EQ(12U, conn3.GetCount());
+}
+
+TEST_P(NewSdpTest, CheckDirections) {
+ ParseSdp(kBasicAudioVideoOffer);
+
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(SdpDirectionAttribute::kSendonly,
+ mSdp->GetMediaSection(0).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpDirectionAttribute::kRecvonly,
+ mSdp->GetMediaSection(1).GetAttributeList().GetDirection());
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv,
+ mSdp->GetMediaSection(2).GetAttributeList().GetDirection());
+}
+
+TEST_P(NewSdpTest, CheckCandidates) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kCandidateAttribute));
+ auto audio_candidates =
+ mSdp->GetMediaSection(0).GetAttributeList().GetCandidate();
+ ASSERT_EQ(8U, audio_candidates.size());
+ ASSERT_EQ("0 1 UDP 2130379007 10.0.0.36 62453 typ host", audio_candidates[0]);
+ ASSERT_EQ("2 1 UDP 1694236671 24.6.134.204 62453 typ srflx raddr 10.0.0.36 rport 62453", audio_candidates[1]);
+ ASSERT_EQ("3 1 UDP 100401151 162.222.183.171 49761 typ relay raddr 162.222.183.171 rport 49761", audio_candidates[2]);
+ ASSERT_EQ("6 1 UDP 16515071 162.222.183.171 51858 typ relay raddr 162.222.183.171 rport 51858", audio_candidates[3]);
+ ASSERT_EQ("3 2 UDP 100401150 162.222.183.171 62454 typ relay raddr 162.222.183.171 rport 62454", audio_candidates[4]);
+ ASSERT_EQ("2 2 UDP 1694236670 24.6.134.204 55428 typ srflx raddr 10.0.0.36 rport 55428", audio_candidates[5]);
+ ASSERT_EQ("6 2 UDP 16515070 162.222.183.171 50340 typ relay raddr 162.222.183.171 rport 50340", audio_candidates[6]);
+ ASSERT_EQ("0 2 UDP 2130379006 10.0.0.36 55428 typ host", audio_candidates[7]);
+
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kCandidateAttribute));
+ auto video_candidates =
+ mSdp->GetMediaSection(1).GetAttributeList().GetCandidate();
+ ASSERT_EQ(8U, video_candidates.size());
+ ASSERT_EQ("0 1 UDP 2130379007 10.0.0.36 59530 typ host", video_candidates[0]);
+ ASSERT_EQ("0 2 UDP 2130379006 10.0.0.36 64378 typ host", video_candidates[1]);
+ ASSERT_EQ("2 2 UDP 1694236670 24.6.134.204 64378 typ srflx raddr 10.0.0.36 rport 64378", video_candidates[2]);
+ ASSERT_EQ("6 2 UDP 16515070 162.222.183.171 64941 typ relay raddr 162.222.183.171 rport 64941", video_candidates[3]);
+ ASSERT_EQ("6 1 UDP 16515071 162.222.183.171 64800 typ relay raddr 162.222.183.171 rport 64800", video_candidates[4]);
+ ASSERT_EQ("2 1 UDP 1694236671 24.6.134.204 59530 typ srflx raddr 10.0.0.36 rport 59530", video_candidates[5]);
+ ASSERT_EQ("3 1 UDP 100401151 162.222.183.171 62935 typ relay raddr 162.222.183.171 rport 62935", video_candidates[6]);
+ ASSERT_EQ("3 2 UDP 100401150 162.222.183.171 61026 typ relay raddr 162.222.183.171 rport 61026", video_candidates[7]);
+
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kCandidateAttribute));
+}
+
+TEST_P(NewSdpTest, CheckMid) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_EQ("first", mSdp->GetMediaSection(0).GetAttributeList().GetMid());
+ ASSERT_EQ("second", mSdp->GetMediaSection(1).GetAttributeList().GetMid());
+ ASSERT_EQ("third", mSdp->GetMediaSection(2).GetAttributeList().GetMid());
+}
+
+TEST_P(NewSdpTest, CheckMsid) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kMsidSemanticAttribute));
+ auto semantics = mSdp->GetAttributeList().GetMsidSemantic().mMsidSemantics;
+ ASSERT_EQ(2U, semantics.size());
+ ASSERT_EQ("WMS", semantics[0].semantic);
+ ASSERT_EQ(2U, semantics[0].msids.size());
+ ASSERT_EQ("stream", semantics[0].msids[0]);
+ ASSERT_EQ("streama", semantics[0].msids[1]);
+ ASSERT_EQ("foo", semantics[1].semantic);
+ ASSERT_EQ(1U, semantics[1].msids.size());
+ ASSERT_EQ("stream", semantics[1].msids[0]);
+
+
+ const SdpMsidAttributeList& msids1 =
+ mSdp->GetMediaSection(0).GetAttributeList().GetMsid();
+ ASSERT_EQ(1U, msids1.mMsids.size());
+ ASSERT_EQ("stream", msids1.mMsids[0].identifier);
+ ASSERT_EQ("track", msids1.mMsids[0].appdata);
+ const SdpMsidAttributeList& msids2 =
+ mSdp->GetMediaSection(1).GetAttributeList().GetMsid();
+ ASSERT_EQ(2U, msids2.mMsids.size());
+ ASSERT_EQ("streama", msids2.mMsids[0].identifier);
+ ASSERT_EQ("tracka", msids2.mMsids[0].appdata);
+ ASSERT_EQ("streamb", msids2.mMsids[1].identifier);
+ ASSERT_EQ("trackb", msids2.mMsids[1].appdata);
+ const SdpMsidAttributeList& msids3 =
+ mSdp->GetMediaSection(2).GetAttributeList().GetMsid();
+ ASSERT_EQ(1U, msids3.mMsids.size());
+ ASSERT_EQ("noappdata", msids3.mMsids[0].identifier);
+ ASSERT_EQ("", msids3.mMsids[0].appdata);
+}
+
+TEST_P(NewSdpTest, CheckRid)
+{
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRidAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRidAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kRidAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kRidAttribute));
+
+ const SdpRidAttributeList& rids =
+ mSdp->GetMediaSection(1).GetAttributeList().GetRid();
+
+ ASSERT_EQ(1U, rids.mRids.size());
+ ASSERT_EQ("bar", rids.mRids[0].id);
+ ASSERT_EQ(sdp::kRecv, rids.mRids[0].direction);
+ ASSERT_EQ(1U, rids.mRids[0].formats.size());
+ ASSERT_EQ(96U, rids.mRids[0].formats[0]);
+ ASSERT_EQ(800U, rids.mRids[0].constraints.maxWidth);
+ ASSERT_EQ(600U, rids.mRids[0].constraints.maxHeight);
+}
+
+TEST_P(NewSdpTest, CheckMediaLevelIceUfrag) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceUfragAttribute, true));
+ ASSERT_EQ("00000000",
+ mSdp->GetMediaSection(0).GetAttributeList().GetIceUfrag());
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceUfragAttribute, false));
+
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceUfragAttribute, true));
+ ASSERT_EQ("4a799b2e",
+ mSdp->GetMediaSection(1).GetAttributeList().GetIceUfrag());
+}
+
+TEST_P(NewSdpTest, CheckMediaLevelIcePwd) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kIcePwdAttribute));
+ ASSERT_EQ("0000000000000000000000000000000",
+ mSdp->GetMediaSection(0).GetAttributeList().GetIcePwd());
+
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kIcePwdAttribute));
+ ASSERT_EQ("e4cc12a910f106a0a744719425510e17",
+ mSdp->GetMediaSection(1).GetAttributeList().GetIcePwd());
+}
+
+TEST_P(NewSdpTest, CheckGroups) {
+ ParseSdp(kBasicAudioVideoOffer);
+ const SdpGroupAttributeList& group = mSdp->GetAttributeList().GetGroup();
+ const SdpGroupAttributeList::Group& group1 = group.mGroups[0];
+ ASSERT_EQ(SdpGroupAttributeList::kBundle, group1.semantics);
+ ASSERT_EQ(2U, group1.tags.size());
+ ASSERT_EQ("first", group1.tags[0]);
+ ASSERT_EQ("second", group1.tags[1]);
+
+ const SdpGroupAttributeList::Group& group2 = group.mGroups[1];
+ ASSERT_EQ(SdpGroupAttributeList::kBundle, group2.semantics);
+ ASSERT_EQ(1U, group2.tags.size());
+ ASSERT_EQ("third", group2.tags[0]);
+
+ const SdpGroupAttributeList::Group& group3 = group.mGroups[2];
+ ASSERT_EQ(SdpGroupAttributeList::kLs, group3.semantics);
+ ASSERT_EQ(2U, group3.tags.size());
+ ASSERT_EQ("first", group3.tags[0]);
+ ASSERT_EQ("third", group3.tags[1]);
+}
+
+// SDP from a basic A/V call with data channel FFX/FFX
+const std::string kBasicAudioVideoDataOffer =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 27987 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"t=0 0" CRLF
+"a=ice-ufrag:8a39d2ae" CRLF
+"a=ice-pwd:601d53aba51a318351b3ecf5ee00048f" CRLF
+"a=fingerprint:sha-256 30:FF:8E:2B:AC:9D:ED:70:18:10:67:C8:AE:9E:68:F3:86:53:51:B0:AC:31:B7:BE:6D:CF:A4:2E:D3:6E:B4:28" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF
+"a=ptime:20" CRLF
+"a=rtpmap:9 G722/8000" CRLF
+"a=rtpmap:0 PCMU/8000" CRLF
+"a=rtpmap:8 PCMA/8000" CRLF
+"a=rtpmap:101 telephone-event/8000" CRLF
+"a=fmtp:101 0-15" CRLF
+"a=sendrecv" CRLF
+"a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level" CRLF
+"a=extmap:2/sendonly some_extension" CRLF
+"a=extmap:3 some_other_extension some_params some more params" CRLF
+"a=setup:actpass" CRLF
+"a=rtcp-mux" CRLF
+"m=video 9 RTP/SAVPF 120 126 97" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF
+"a=rtpmap:126 H264/90000" CRLF
+"a=fmtp:126 profile-level-id=42e01f;packetization-mode=1" CRLF
+"a=rtpmap:97 H264/90000" CRLF
+"a=fmtp:97 profile-level-id=42e01f" CRLF
+"a=sendrecv" CRLF
+// sipcc barfs on this, despite that it is valid syntax
+// Do we care about fixing?
+//"a=rtcp-fb:120 ack" CRLF // Should be ignored by sipcc
+"a=rtcp-fb:120 ack rpsi" CRLF
+"a=rtcp-fb:120 ack app foo" CRLF
+"a=rtcp-fb:120 ack foo" CRLF // Should be ignored
+"a=rtcp-fb:120 nack" CRLF
+"a=rtcp-fb:120 nack sli" CRLF
+"a=rtcp-fb:120 nack pli" CRLF
+"a=rtcp-fb:120 nack rpsi" CRLF
+"a=rtcp-fb:120 nack app foo" CRLF
+"a=rtcp-fb:120 nack foo" CRLF // Should be ignored
+"a=rtcp-fb:120 ccm fir" CRLF
+"a=rtcp-fb:120 ccm tmmbr" CRLF
+"a=rtcp-fb:120 ccm tstr" CRLF
+"a=rtcp-fb:120 ccm vbcm" CRLF
+"a=rtcp-fb:120 ccm foo" CRLF // Should be ignored
+"a=rtcp-fb:120 trr-int 10" CRLF
+"a=rtcp-fb:120 goog-remb" CRLF
+"a=rtcp-fb:120 foo" CRLF // Should be ignored
+"a=rtcp-fb:126 nack" CRLF
+"a=rtcp-fb:126 nack pli" CRLF
+"a=rtcp-fb:126 ccm fir" CRLF
+"a=rtcp-fb:97 nack" CRLF
+"a=rtcp-fb:97 nack pli" CRLF
+"a=rtcp-fb:97 ccm fir" CRLF
+"a=rtcp-fb:* ccm tmmbr" CRLF
+"a=setup:actpass" CRLF
+"a=rtcp-mux" CRLF
+"m=application 9 DTLS/SCTP 5000" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=sctpmap:5000 webrtc-datachannel 16" CRLF
+"a=setup:actpass" CRLF;
+
+TEST_P(NewSdpTest, BasicAudioVideoDataSdpParse) {
+ ParseSdp(kBasicAudioVideoDataOffer);
+ ASSERT_EQ(0U, mParser.GetParseErrors().size()) <<
+ "Got parse errors: " << GetParseErrors();
+}
+
+TEST_P(NewSdpTest, CheckApplicationParameters) {
+ ParseSdp(kBasicAudioVideoDataOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+ ASSERT_EQ(SdpMediaSection::kAudio, mSdp->GetMediaSection(0).GetMediaType())
+ << "Wrong type for first media section";
+ ASSERT_EQ(SdpMediaSection::kVideo, mSdp->GetMediaSection(1).GetMediaType())
+ << "Wrong type for second media section";
+ ASSERT_EQ(SdpMediaSection::kApplication, mSdp->GetMediaSection(2).GetMediaType())
+ << "Wrong type for third media section";
+
+ ASSERT_EQ(SdpMediaSection::kDtlsSctp,
+ mSdp->GetMediaSection(2).GetProtocol())
+ << "Wrong protocol for application";
+ auto app_formats = mSdp->GetMediaSection(2).GetFormats();
+ ASSERT_EQ(1U, app_formats.size()) << "Wrong number of formats for audio";
+ ASSERT_EQ("5000", app_formats[0]);
+
+ const SdpConnection& conn3 = mSdp->GetMediaSection(2).GetConnection();
+ ASSERT_EQ(sdp::kIPv4, conn3.GetAddrType());
+ ASSERT_EQ("0.0.0.0", conn3.GetAddress());
+ ASSERT_EQ(0U, conn3.GetTtl());
+ ASSERT_EQ(0U, conn3.GetCount());
+
+ ASSERT_TRUE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kSetupAttribute));
+ ASSERT_EQ(SdpSetupAttribute::kActpass,
+ mSdp->GetMediaSection(2).GetAttributeList().GetSetup().mRole);
+}
+
+TEST_P(NewSdpTest, CheckExtmap) {
+ ParseSdp(kBasicAudioVideoDataOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kExtmapAttribute));
+
+ auto extmaps =
+ mSdp->GetMediaSection(0).GetAttributeList().GetExtmap().mExtmaps;
+ ASSERT_EQ(3U, extmaps.size());
+
+ ASSERT_EQ(1U, extmaps[0].entry);
+ ASSERT_FALSE(extmaps[0].direction_specified);
+ ASSERT_EQ("urn:ietf:params:rtp-hdrext:ssrc-audio-level",
+ extmaps[0].extensionname);
+ ASSERT_EQ("",
+ extmaps[0].extensionattributes);
+
+ ASSERT_EQ(2U, extmaps[1].entry);
+ ASSERT_TRUE(extmaps[1].direction_specified);
+ ASSERT_EQ(SdpDirectionAttribute::kSendonly, extmaps[1].direction);
+ ASSERT_EQ("some_extension",
+ extmaps[1].extensionname);
+ ASSERT_EQ("",
+ extmaps[1].extensionattributes);
+
+ ASSERT_EQ(3U, extmaps[2].entry);
+ ASSERT_FALSE(extmaps[2].direction_specified);
+ ASSERT_EQ("some_other_extension",
+ extmaps[2].extensionname);
+ ASSERT_EQ("some_params some more params",
+ extmaps[2].extensionattributes);
+}
+
+TEST_P(NewSdpTest, CheckRtcpFb) {
+ ParseSdp(kBasicAudioVideoDataOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ auto& video_attrs = mSdp->GetMediaSection(1).GetAttributeList();
+ ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kRtcpFbAttribute));
+ auto& rtcpfbs = video_attrs.GetRtcpFb().mFeedbacks;
+ ASSERT_EQ(20U, rtcpfbs.size());
+ CheckRtcpFb(rtcpfbs[0], "120", SdpRtcpFbAttributeList::kAck, "rpsi");
+ CheckRtcpFb(rtcpfbs[1], "120", SdpRtcpFbAttributeList::kAck, "app", "foo");
+ CheckRtcpFb(rtcpfbs[2], "120", SdpRtcpFbAttributeList::kNack, "");
+ CheckRtcpFb(rtcpfbs[3], "120", SdpRtcpFbAttributeList::kNack, "sli");
+ CheckRtcpFb(rtcpfbs[4], "120", SdpRtcpFbAttributeList::kNack, "pli");
+ CheckRtcpFb(rtcpfbs[5], "120", SdpRtcpFbAttributeList::kNack, "rpsi");
+ CheckRtcpFb(rtcpfbs[6], "120", SdpRtcpFbAttributeList::kNack, "app", "foo");
+ CheckRtcpFb(rtcpfbs[7], "120", SdpRtcpFbAttributeList::kCcm, "fir");
+ CheckRtcpFb(rtcpfbs[8], "120", SdpRtcpFbAttributeList::kCcm, "tmmbr");
+ CheckRtcpFb(rtcpfbs[9], "120", SdpRtcpFbAttributeList::kCcm, "tstr");
+ CheckRtcpFb(rtcpfbs[10], "120", SdpRtcpFbAttributeList::kCcm, "vbcm");
+ CheckRtcpFb(rtcpfbs[11], "120", SdpRtcpFbAttributeList::kTrrInt, "10");
+ CheckRtcpFb(rtcpfbs[12], "120", SdpRtcpFbAttributeList::kRemb, "");
+ CheckRtcpFb(rtcpfbs[13], "126", SdpRtcpFbAttributeList::kNack, "");
+ CheckRtcpFb(rtcpfbs[14], "126", SdpRtcpFbAttributeList::kNack, "pli");
+ CheckRtcpFb(rtcpfbs[15], "126", SdpRtcpFbAttributeList::kCcm, "fir");
+ CheckRtcpFb(rtcpfbs[16], "97", SdpRtcpFbAttributeList::kNack, "");
+ CheckRtcpFb(rtcpfbs[17], "97", SdpRtcpFbAttributeList::kNack, "pli");
+ CheckRtcpFb(rtcpfbs[18], "97", SdpRtcpFbAttributeList::kCcm, "fir");
+ CheckRtcpFb(rtcpfbs[19], "*", SdpRtcpFbAttributeList::kCcm, "tmmbr");
+}
+
+TEST_P(NewSdpTest, CheckRtcp) {
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute));
+
+ auto& rtcpAttr_0 = mSdp->GetMediaSection(0).GetAttributeList().GetRtcp();
+ ASSERT_EQ(62454U, rtcpAttr_0.mPort);
+ ASSERT_EQ(sdp::kInternet, rtcpAttr_0.mNetType);
+ ASSERT_EQ(sdp::kIPv4, rtcpAttr_0.mAddrType);
+ ASSERT_EQ("162.222.183.171", rtcpAttr_0.mAddress);
+
+ auto& rtcpAttr_1 = mSdp->GetMediaSection(1).GetAttributeList().GetRtcp();
+ ASSERT_EQ(61026U, rtcpAttr_1.mPort);
+ ASSERT_EQ("", rtcpAttr_1.mAddress);
+}
+
+TEST_P(NewSdpTest, CheckImageattr)
+{
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kImageattrAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kImageattrAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kImageattrAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kImageattrAttribute));
+
+ const SdpImageattrAttributeList& imageattrs =
+ mSdp->GetMediaSection(1).GetAttributeList().GetImageattr();
+
+ ASSERT_EQ(2U, imageattrs.mImageattrs.size());
+ const SdpImageattrAttributeList::Imageattr& imageattr_0(
+ imageattrs.mImageattrs[0]);
+ ASSERT_TRUE(imageattr_0.pt.isSome());
+ ASSERT_EQ(120U, *imageattr_0.pt);
+ ASSERT_TRUE(imageattr_0.sendAll);
+ ASSERT_TRUE(imageattr_0.recvAll);
+
+ const SdpImageattrAttributeList::Imageattr& imageattr_1(
+ imageattrs.mImageattrs[1]);
+ ASSERT_TRUE(imageattr_1.pt.isSome());
+ ASSERT_EQ(121U, *imageattr_1.pt);
+ ASSERT_FALSE(imageattr_1.sendAll);
+ ASSERT_FALSE(imageattr_1.recvAll);
+ ASSERT_EQ(1U, imageattr_1.sendSets.size());
+ ASSERT_EQ(1U, imageattr_1.sendSets[0].xRange.discreteValues.size());
+ ASSERT_EQ(640U, imageattr_1.sendSets[0].xRange.discreteValues.front());
+ ASSERT_EQ(1U, imageattr_1.sendSets[0].yRange.discreteValues.size());
+ ASSERT_EQ(480U, imageattr_1.sendSets[0].yRange.discreteValues.front());
+ ASSERT_EQ(1U, imageattr_1.recvSets.size());
+ ASSERT_EQ(1U, imageattr_1.recvSets[0].xRange.discreteValues.size());
+ ASSERT_EQ(640U, imageattr_1.recvSets[0].xRange.discreteValues.front());
+ ASSERT_EQ(1U, imageattr_1.recvSets[0].yRange.discreteValues.size());
+ ASSERT_EQ(480U, imageattr_1.recvSets[0].yRange.discreteValues.front());
+}
+
+TEST_P(NewSdpTest, CheckSimulcast)
+{
+ ParseSdp(kBasicAudioVideoOffer);
+ ASSERT_TRUE(!!mSdp);
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount()) << "Wrong number of media sections";
+
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kSimulcastAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSimulcastAttribute));
+ ASSERT_TRUE(mSdp->GetMediaSection(1).GetAttributeList().HasAttribute(
+ SdpAttribute::kSimulcastAttribute));
+ ASSERT_FALSE(mSdp->GetMediaSection(2).GetAttributeList().HasAttribute(
+ SdpAttribute::kSimulcastAttribute));
+
+ const SdpSimulcastAttribute& simulcast =
+ mSdp->GetMediaSection(1).GetAttributeList().GetSimulcast();
+
+ ASSERT_EQ(2U, simulcast.recvVersions.size());
+ ASSERT_EQ(0U, simulcast.sendVersions.size());
+ ASSERT_EQ(1U, simulcast.recvVersions[0].choices.size());
+ ASSERT_EQ("120", simulcast.recvVersions[0].choices[0]);
+ ASSERT_EQ(1U, simulcast.recvVersions[1].choices.size());
+ ASSERT_EQ("121", simulcast.recvVersions[1].choices[0]);
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt,
+ simulcast.recvVersions.type);
+}
+
+TEST_P(NewSdpTest, CheckSctpmap) {
+ ParseSdp(kBasicAudioVideoDataOffer);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+ ASSERT_EQ(3U, mSdp->GetMediaSectionCount())
+ << "Wrong number of media sections";
+
+ const SdpMediaSection& appsec = mSdp->GetMediaSection(2);
+ ASSERT_TRUE(
+ appsec.GetAttributeList().HasAttribute(SdpAttribute::kSctpmapAttribute));
+ const SdpSctpmapAttributeList& sctpmap =
+ appsec.GetAttributeList().GetSctpmap();
+
+ ASSERT_EQ(1U, sctpmap.mSctpmaps.size())
+ << "Wrong number of sctpmap attributes";
+ ASSERT_EQ(1U, appsec.GetFormats().size());
+
+ // Need to know name of type
+ CheckSctpmap("5000",
+ "webrtc-datachannel",
+ 16,
+ appsec.GetFormats()[0],
+ sctpmap);
+}
+
+const std::string kNewSctpmapOfferDraft07 =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 27987 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"t=0 0" CRLF
+"a=ice-ufrag:8a39d2ae" CRLF
+"a=ice-pwd:601d53aba51a318351b3ecf5ee00048f" CRLF
+"a=fingerprint:sha-256 30:FF:8E:2B:AC:9D:ED:70:18:10:67:C8:AE:9E:68:F3:86:53:51:B0:AC:31:B7:BE:6D:CF:A4:2E:D3:6E:B4:28" CRLF
+"m=application 9 DTLS/SCTP webrtc-datachannel" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=fmtp:webrtc-datachannel max-message-size=100000" CRLF
+"a=sctp-port 5000" CRLF
+"a=setup:actpass" CRLF;
+
+TEST_P(NewSdpTest, NewSctpmapSdpParse) {
+ ParseSdp(kNewSctpmapOfferDraft07, false);
+}
+
+INSTANTIATE_TEST_CASE_P(RoundTripSerialize,
+ NewSdpTest,
+ ::testing::Values(false, true));
+
+const std::string kCandidateInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=candidate:0 1 UDP 2130379007 10.0.0.36 62453 typ host" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF;
+
+// This may or may not parse, but if it does, the errant candidate attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckCandidateInSessionLevel) {
+ ParseSdp(kCandidateInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kCandidateAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kCandidateAttribute));
+ }
+}
+
+const std::string kBundleOnlyInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=bundle-only" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckBundleOnlyInSessionLevel) {
+ ParseSdp(kBundleOnlyInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kBundleOnlyAttribute));
+ }
+}
+
+const std::string kFmtpInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=fmtp:109 0-15" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckFmtpInSessionLevel) {
+ ParseSdp(kFmtpInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kFmtpAttribute));
+ }
+}
+
+const std::string kIceMismatchInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=ice-mismatch" CRLF
+"m=audio 9 RTP/SAVPF 109 9 0 8 101" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:109 opus/48000/2" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckIceMismatchInSessionLevel) {
+ ParseSdp(kIceMismatchInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kIceMismatchAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kIceMismatchAttribute));
+ }
+}
+
+const std::string kImageattrInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=imageattr:120 send * recv *" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckImageattrInSessionLevel) {
+ ParseSdp(kImageattrInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kImageattrAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kImageattrAttribute));
+ }
+}
+
+const std::string kLabelInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=label:foobar" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckLabelInSessionLevel) {
+ ParseSdp(kLabelInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kLabelAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kLabelAttribute));
+ }
+}
+
+const std::string kMaxptimeInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=maxptime:100" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckMaxptimeInSessionLevel) {
+ ParseSdp(kMaxptimeInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kMaxptimeAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kMaxptimeAttribute));
+ }
+}
+
+const std::string kMidInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=mid:foobar" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckMidInSessionLevel) {
+ ParseSdp(kMidInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kMidAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kMidAttribute));
+ }
+}
+
+const std::string kMsidInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=msid:foobar" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckMsidInSessionLevel) {
+ ParseSdp(kMsidInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kMsidAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kMsidAttribute));
+ }
+}
+
+const std::string kPtimeInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=ptime:50" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckPtimeInSessionLevel) {
+ ParseSdp(kPtimeInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kPtimeAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kPtimeAttribute));
+ }
+}
+
+const std::string kRemoteCandidatesInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=remote-candidates:0 10.0.0.1 5555" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckRemoteCandidatesInSessionLevel) {
+ ParseSdp(kRemoteCandidatesInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRemoteCandidatesAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRemoteCandidatesAttribute));
+ }
+}
+
+const std::string kRtcpInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=rtcp:5555" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckRtcpInSessionLevel) {
+ ParseSdp(kRtcpInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpAttribute));
+ }
+}
+
+const std::string kRtcpFbInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=rtcp-fb:120 nack" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckRtcpFbInSessionLevel) {
+ ParseSdp(kRtcpFbInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpFbAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpFbAttribute));
+ }
+}
+
+const std::string kRtcpMuxInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=rtcp-mux" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckRtcpMuxInSessionLevel) {
+ ParseSdp(kRtcpMuxInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpMuxAttribute));
+ }
+}
+
+const std::string kRtcpRsizeInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=rtcp-rsize" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckRtcpRsizeInSessionLevel) {
+ ParseSdp(kRtcpRsizeInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpRsizeAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtcpRsizeAttribute));
+ }
+}
+
+const std::string kRtpmapInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckRtpmapInSessionLevel) {
+ ParseSdp(kRtpmapInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kRtpmapAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtpmapAttribute));
+ }
+}
+
+const std::string kSctpmapInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=sctpmap:5000" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckSctpmapInSessionLevel) {
+ ParseSdp(kSctpmapInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSctpmapAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kSctpmapAttribute));
+ }
+}
+
+const std::string kSsrcInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=ssrc:5000" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckSsrcInSessionLevel) {
+ ParseSdp(kSsrcInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcAttribute));
+ }
+}
+
+const std::string kSsrcGroupInSessionSDP =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"a=ssrc-group:FID 5000" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+// This may or may not parse, but if it does, the errant attribute
+// should be ignored.
+TEST_P(NewSdpTest, CheckSsrcGroupInSessionLevel) {
+ ParseSdp(kSsrcGroupInSessionSDP, false);
+ if (mSdp) {
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcGroupAttribute));
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kSsrcGroupAttribute));
+ }
+}
+
+const std::string kMalformedImageattr =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF
+"a=imageattr:flob" CRLF;
+
+TEST_P(NewSdpTest, CheckMalformedImageattr)
+{
+ if (GetParam()) {
+ // Don't do a parse/serialize before running this test
+ return;
+ }
+
+ ParseSdp(kMalformedImageattr, false);
+ ASSERT_NE("", GetParseErrors());
+}
+
+TEST_P(NewSdpTest, ParseInvalidSimulcastNoSuchSendRid) {
+ ParseSdp("v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "b=CT:5000" CRLF
+ "t=0 0" CRLF
+ "m=video 56436 RTP/SAVPF 120" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ "a=sendrecv" CRLF
+ "a=simulcast: send rid=9" CRLF,
+ false);
+ ASSERT_NE("", GetParseErrors());
+}
+
+TEST_P(NewSdpTest, ParseInvalidSimulcastNoSuchRecvRid) {
+ ParseSdp("v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "b=CT:5000" CRLF
+ "t=0 0" CRLF
+ "m=video 56436 RTP/SAVPF 120" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ "a=sendrecv" CRLF
+ "a=simulcast: recv rid=9" CRLF,
+ false);
+ ASSERT_NE("", GetParseErrors());
+}
+
+TEST_P(NewSdpTest, ParseInvalidSimulcastNoSuchPt) {
+ ParseSdp("v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "b=CT:5000" CRLF
+ "t=0 0" CRLF
+ "m=video 56436 RTP/SAVPF 120" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ "a=sendrecv" CRLF
+ "a=simulcast: send pt=9" CRLF,
+ false);
+ ASSERT_NE("", GetParseErrors());
+}
+
+TEST_P(NewSdpTest, ParseInvalidSimulcastNotSending) {
+ ParseSdp("v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "b=CT:5000" CRLF
+ "t=0 0" CRLF
+ "m=video 56436 RTP/SAVPF 120" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ "a=recvonly" CRLF
+ "a=simulcast: send pt=120" CRLF,
+ false);
+ ASSERT_NE("", GetParseErrors());
+}
+
+TEST_P(NewSdpTest, ParseInvalidSimulcastNotReceiving) {
+ ParseSdp("v=0" CRLF
+ "o=- 4294967296 2 IN IP4 127.0.0.1" CRLF
+ "s=SIP Call" CRLF
+ "c=IN IP4 198.51.100.7" CRLF
+ "b=CT:5000" CRLF
+ "t=0 0" CRLF
+ "m=video 56436 RTP/SAVPF 120" CRLF
+ "a=rtpmap:120 VP8/90000" CRLF
+ "a=sendonly" CRLF
+ "a=simulcast: recv pt=120" CRLF,
+ false);
+ ASSERT_NE("", GetParseErrors());
+}
+
+const std::string kNoAttributes =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+TEST_P(NewSdpTest, CheckNoAttributes) {
+ ParseSdp(kNoAttributes);
+
+ for (auto a = static_cast<size_t>(SdpAttribute::kFirstAttribute);
+ a <= static_cast<size_t>(SdpAttribute::kLastAttribute);
+ ++a) {
+
+ SdpAttribute::AttributeType type =
+ static_cast<SdpAttribute::AttributeType>(a);
+
+ // rtpmap is a special case right now, we throw parse errors if it is
+ // missing, and then insert one.
+ // direction is another special case that gets a default if not present
+ if (type != SdpAttribute::kRtpmapAttribute &&
+ type != SdpAttribute::kDirectionAttribute) {
+ ASSERT_FALSE(
+ mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(type))
+ << "Attribute " << a << " should not have been present at media level";
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(type))
+ << "Attribute " << a << " should not have been present at session level";
+ }
+ }
+
+ ASSERT_FALSE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kRtpmapAttribute));
+
+ ASSERT_TRUE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kDirectionAttribute));
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv,
+ mSdp->GetMediaSection(0).GetAttributeList().GetDirection());
+ ASSERT_TRUE(mSdp->GetAttributeList().HasAttribute(
+ SdpAttribute::kDirectionAttribute));
+ ASSERT_EQ(SdpDirectionAttribute::kSendrecv,
+ mSdp->GetAttributeList().GetDirection());
+}
+
+
+const std::string kMediaLevelDtlsMessage =
+"v=0" CRLF
+"o=Mozilla-SIPUA-35.0a1 5184 0 IN IP4 0.0.0.0" CRLF
+"s=SIP Call" CRLF
+"c=IN IP4 224.0.0.1/100/12" CRLF
+"t=0 0" CRLF
+"m=video 9 RTP/SAVPF 120" CRLF
+"c=IN IP4 0.0.0.0" CRLF
+"a=dtls-message:client " BASE64_DTLS_HELLO CRLF
+"a=rtpmap:120 VP8/90000" CRLF;
+
+TEST_P(NewSdpTest, CheckMediaLevelDtlsMessage) {
+ ParseSdp(kMediaLevelDtlsMessage);
+ ASSERT_TRUE(!!mSdp) << "Parse failed: " << GetParseErrors();
+
+ // dtls-message is not defined for use at the media level; we don't
+ // parse it
+ ASSERT_FALSE(mSdp->GetMediaSection(0).GetAttributeList().HasAttribute(
+ SdpAttribute::kDtlsMessageAttribute));
+}
+
+
+TEST(NewSdpTestNoFixture, CheckAttributeTypeSerialize) {
+ for (auto a = static_cast<size_t>(SdpAttribute::kFirstAttribute);
+ a <= static_cast<size_t>(SdpAttribute::kLastAttribute);
+ ++a) {
+
+ SdpAttribute::AttributeType type =
+ static_cast<SdpAttribute::AttributeType>(a);
+
+ // Direction attributes are handled a little differently
+ if (type != SdpAttribute::kDirectionAttribute) {
+ std::ostringstream os;
+ os << type;
+ ASSERT_NE("", os.str());
+ }
+ }
+}
+
+static SdpImageattrAttributeList::XYRange
+ParseXYRange(const std::string& input)
+{
+ std::istringstream is(input + ",");
+ std::string error;
+ SdpImageattrAttributeList::XYRange range;
+ EXPECT_TRUE(range.Parse(is, &error)) << error;
+ EXPECT_EQ(',', is.get());
+ EXPECT_EQ(EOF, is.get());
+ return range;
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrXYRangeParseValid)
+{
+ {
+ SdpImageattrAttributeList::XYRange range(ParseXYRange("640"));
+ ASSERT_EQ(1U, range.discreteValues.size());
+ ASSERT_EQ(640U, range.discreteValues[0]);
+ }
+
+ {
+ SdpImageattrAttributeList::XYRange range(ParseXYRange("[320,640]"));
+ ASSERT_EQ(2U, range.discreteValues.size());
+ ASSERT_EQ(320U, range.discreteValues[0]);
+ ASSERT_EQ(640U, range.discreteValues[1]);
+ }
+
+ {
+ SdpImageattrAttributeList::XYRange range(ParseXYRange("[320,640,1024]"));
+ ASSERT_EQ(3U, range.discreteValues.size());
+ ASSERT_EQ(320U, range.discreteValues[0]);
+ ASSERT_EQ(640U, range.discreteValues[1]);
+ ASSERT_EQ(1024U, range.discreteValues[2]);
+ }
+
+ {
+ SdpImageattrAttributeList::XYRange range(ParseXYRange("[320:640]"));
+ ASSERT_EQ(0U, range.discreteValues.size());
+ ASSERT_EQ(320U, range.min);
+ ASSERT_EQ(1U, range.step);
+ ASSERT_EQ(640U, range.max);
+ }
+
+ {
+ SdpImageattrAttributeList::XYRange range(ParseXYRange("[320:16:640]"));
+ ASSERT_EQ(0U, range.discreteValues.size());
+ ASSERT_EQ(320U, range.min);
+ ASSERT_EQ(16U, range.step);
+ ASSERT_EQ(640U, range.max);
+ }
+}
+
+template<typename T>
+void
+ParseInvalid(const std::string& input, size_t last)
+{
+ std::istringstream is(input);
+ T parsed;
+ std::string error;
+ ASSERT_FALSE(parsed.Parse(is, &error))
+ << "\'" << input << "\' should not have parsed successfully";
+ is.clear();
+ ASSERT_EQ(last, static_cast<size_t>(is.tellg()))
+ << "Parse failed at unexpected location:" << std::endl
+ << input << std::endl
+ << std::string(is.tellg(), ' ') << "^" << std::endl;
+ // For a human to eyeball to make sure the error strings look sane
+ std::cout << "\"" << input << "\" - " << error << std::endl; \
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrXYRangeParseInvalid)
+{
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[-1", 1);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[-", 1);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[-v", 1);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:-1", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16:-1", 8);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640,-1", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640,-]", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("-v", 0);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("-1", 0);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("", 0);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[v", 1);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[ 640", 1);
+ // It looks like the overflow detection only happens once the whole number
+ // is scanned...
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[99999999999999999:", 18);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640", 4);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:v", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16", 7);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16:", 8);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16:v", 8);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16:320]", 11);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16:320", 11);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:16:320v", 11);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:1024", 9);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:320]", 8);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640:1024v", 9);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640,", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640,v", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640]", 4);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640x", 4);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("[640,]", 5);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>(" ", 0);
+ ParseInvalid<SdpImageattrAttributeList::XYRange>("v", 0);
+}
+
+static SdpImageattrAttributeList::SRange
+ParseSRange(const std::string& input)
+{
+ std::istringstream is(input + ",");
+ std::string error;
+ SdpImageattrAttributeList::SRange range;
+ EXPECT_TRUE(range.Parse(is, &error)) << error;
+ EXPECT_EQ(',', is.get());
+ EXPECT_EQ(EOF, is.get());
+ return range;
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSRangeParseValid)
+{
+ {
+ SdpImageattrAttributeList::SRange range(ParseSRange("0.1"));
+ ASSERT_EQ(1U, range.discreteValues.size());
+ ASSERT_FLOAT_EQ(0.1f, range.discreteValues[0]);
+ }
+
+ {
+ SdpImageattrAttributeList::SRange range(ParseSRange("[0.1,0.2]"));
+ ASSERT_EQ(2U, range.discreteValues.size());
+ ASSERT_FLOAT_EQ(0.1f, range.discreteValues[0]);
+ ASSERT_FLOAT_EQ(0.2f, range.discreteValues[1]);
+ }
+
+ {
+ SdpImageattrAttributeList::SRange range(ParseSRange("[0.1,0.2,0.3]"));
+ ASSERT_EQ(3U, range.discreteValues.size());
+ ASSERT_FLOAT_EQ(0.1f, range.discreteValues[0]);
+ ASSERT_FLOAT_EQ(0.2f, range.discreteValues[1]);
+ ASSERT_FLOAT_EQ(0.3f, range.discreteValues[2]);
+ }
+
+ {
+ SdpImageattrAttributeList::SRange range(ParseSRange("[0.1-0.2]"));
+ ASSERT_EQ(0U, range.discreteValues.size());
+ ASSERT_FLOAT_EQ(0.1f, range.min);
+ ASSERT_FLOAT_EQ(0.2f, range.max);
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSRangeParseInvalid)
+{
+ ParseInvalid<SdpImageattrAttributeList::SRange>("", 0);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[v", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[-1", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[-", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[v", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[ 0.2", 1);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[10.1-", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.08-", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2", 4);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2-", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2-v", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2--1", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2-0.3", 8);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2-0.1]", 8);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2-0.3v", 8);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2,", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2,v", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2,-1", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2]", 4);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2v", 4);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2,]", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("[0.2,-]", 5);
+ ParseInvalid<SdpImageattrAttributeList::SRange>(" ", 0);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("v", 0);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("-v", 0);
+ ParseInvalid<SdpImageattrAttributeList::SRange>("-1", 0);
+}
+
+static SdpImageattrAttributeList::PRange
+ParsePRange(const std::string& input)
+{
+ std::istringstream is(input + ",");
+ std::string error;
+ SdpImageattrAttributeList::PRange range;
+ EXPECT_TRUE(range.Parse(is, &error)) << error;
+ EXPECT_EQ(',', is.get());
+ EXPECT_EQ(EOF, is.get());
+ return range;
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrPRangeParseValid)
+{
+ SdpImageattrAttributeList::PRange range(ParsePRange("[0.1000-9.9999]"));
+ ASSERT_FLOAT_EQ(0.1f, range.min);
+ ASSERT_FLOAT_EQ(9.9999f, range.max);
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrPRangeParseInvalid)
+{
+ ParseInvalid<SdpImageattrAttributeList::PRange>("", 0);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[v", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[-1", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[-", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[v", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[ 0.2", 1);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[10.1-", 5);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.08-", 5);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2", 4);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2-", 5);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2-v", 5);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2--1", 5);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2-0.3", 8);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2-0.1]", 8);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2-0.3v", 8);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2,", 4);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2:", 4);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2]", 4);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("[0.2v", 4);
+ ParseInvalid<SdpImageattrAttributeList::PRange>(" ", 0);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("v", 0);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("-x", 0);
+ ParseInvalid<SdpImageattrAttributeList::PRange>("-1", 0);
+}
+
+static SdpImageattrAttributeList::Set
+ParseSet(const std::string& input)
+{
+ std::istringstream is(input + " ");
+ std::string error;
+ SdpImageattrAttributeList::Set set;
+ EXPECT_TRUE(set.Parse(is, &error)) << error;
+ EXPECT_EQ(' ', is.get());
+ EXPECT_EQ(EOF, is.get());
+ return set;
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSetParseValid)
+{
+ {
+ SdpImageattrAttributeList::Set set(ParseSet("[x=320,y=240]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.5f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(ParseSet("[X=320,Y=240]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.5f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(ParseSet("[x=320,y=240,par=[0.1-0.2]]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_TRUE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.pRange.min);
+ ASSERT_FLOAT_EQ(0.2f, set.pRange.max);
+ ASSERT_FLOAT_EQ(0.5f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(ParseSet("[x=320,y=240,sar=[0.1-0.2]]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_TRUE(set.sRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.sRange.min);
+ ASSERT_FLOAT_EQ(0.2f, set.sRange.max);
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.5f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(ParseSet("[x=320,y=240,q=0.1]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(
+ ParseSet("[x=320,y=240,par=[0.1-0.2],sar=[0.3-0.4],q=0.6]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_TRUE(set.sRange.IsSet());
+ ASSERT_FLOAT_EQ(0.3f, set.sRange.min);
+ ASSERT_FLOAT_EQ(0.4f, set.sRange.max);
+ ASSERT_TRUE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.pRange.min);
+ ASSERT_FLOAT_EQ(0.2f, set.pRange.max);
+ ASSERT_FLOAT_EQ(0.6f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(ParseSet("[x=320,y=240,foo=bar,q=0.1]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(
+ ParseSet("[x=320,y=240,foo=bar,q=0.1,bar=baz]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(
+ ParseSet("[x=320,y=240,foo=[bar],q=0.1,bar=[baz]]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.qValue);
+ }
+
+ {
+ SdpImageattrAttributeList::Set set(
+ ParseSet("[x=320,y=240,foo=[par=foo,sar=bar],q=0.1,bar=[baz]]"));
+ ASSERT_EQ(1U, set.xRange.discreteValues.size());
+ ASSERT_EQ(320U, set.xRange.discreteValues[0]);
+ ASSERT_EQ(1U, set.yRange.discreteValues.size());
+ ASSERT_EQ(240U, set.yRange.discreteValues[0]);
+ ASSERT_FALSE(set.sRange.IsSet());
+ ASSERT_FALSE(set.pRange.IsSet());
+ ASSERT_FLOAT_EQ(0.1f, set.qValue);
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSetParseInvalid)
+{
+ ParseInvalid<SdpImageattrAttributeList::Set>("", 0);
+ ParseInvalid<SdpImageattrAttributeList::Set>("x", 0);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[", 1);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[=", 2);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x", 2);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[y=", 3);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=[", 4);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320", 6);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320v", 6);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,", 7);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,=", 8);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,x", 8);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,x=", 9);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=[", 10);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240", 12);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240x", 12);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,", 13);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=", 15);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=v", 15);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=0.5", 18);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=0.5,", 19);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=0.5,]", 20);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=0.5,=]", 20);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=0.5,sar=v]", 23);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,q=0.5,q=0.4", 21);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,sar=", 17);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,sar=v", 17);
+ ParseInvalid<SdpImageattrAttributeList::Set>(
+ "[x=320,y=240,sar=[0.5-0.6],sar=[0.7-0.8]", 31);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,par=", 17);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,par=x", 17);
+ ParseInvalid<SdpImageattrAttributeList::Set>(
+ "[x=320,y=240,par=[0.5-0.6],par=[0.7-0.8]", 31);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,foo=", 17);
+ ParseInvalid<SdpImageattrAttributeList::Set>("[x=320,y=240,foo=x", 18);
+}
+
+static SdpImageattrAttributeList::Imageattr
+ParseImageattr(const std::string& input)
+{
+ std::istringstream is(input);
+ std::string error;
+ SdpImageattrAttributeList::Imageattr imageattr;
+ EXPECT_TRUE(imageattr.Parse(is, &error)) << error;
+ EXPECT_TRUE(is.eof());
+ return imageattr;
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrParseValid)
+{
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(ParseImageattr("* send *"));
+ ASSERT_FALSE(imageattr.pt.isSome());
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_FALSE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(ParseImageattr("* SEND *"));
+ ASSERT_FALSE(imageattr.pt.isSome());
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_FALSE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(ParseImageattr("* recv *"));
+ ASSERT_FALSE(imageattr.pt.isSome());
+ ASSERT_FALSE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(ParseImageattr("* RECV *"));
+ ASSERT_FALSE(imageattr.pt.isSome());
+ ASSERT_FALSE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("* recv * send *"));
+ ASSERT_FALSE(imageattr.pt.isSome());
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("* send * recv *"));
+ ASSERT_FALSE(imageattr.pt.isSome());
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("8 send * recv *"));
+ ASSERT_EQ(8U, *imageattr.pt);
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("8 send [x=320,y=240] recv *"));
+ ASSERT_EQ(8U, *imageattr.pt);
+ ASSERT_FALSE(imageattr.sendAll);
+ ASSERT_EQ(1U, imageattr.sendSets.size());
+ ASSERT_EQ(1U, imageattr.sendSets[0].xRange.discreteValues.size());
+ ASSERT_EQ(320U, imageattr.sendSets[0].xRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.sendSets[0].yRange.discreteValues.size());
+ ASSERT_EQ(240U, imageattr.sendSets[0].yRange.discreteValues[0]);
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("8 send [x=320,y=240] [x=640,y=480] recv *"));
+ ASSERT_EQ(8U, *imageattr.pt);
+ ASSERT_FALSE(imageattr.sendAll);
+ ASSERT_EQ(2U, imageattr.sendSets.size());
+ ASSERT_EQ(1U, imageattr.sendSets[0].xRange.discreteValues.size());
+ ASSERT_EQ(320U, imageattr.sendSets[0].xRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.sendSets[0].yRange.discreteValues.size());
+ ASSERT_EQ(240U, imageattr.sendSets[0].yRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.sendSets[1].xRange.discreteValues.size());
+ ASSERT_EQ(640U, imageattr.sendSets[1].xRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.sendSets[1].yRange.discreteValues.size());
+ ASSERT_EQ(480U, imageattr.sendSets[1].yRange.discreteValues[0]);
+ ASSERT_TRUE(imageattr.recvAll);
+ ASSERT_TRUE(imageattr.recvSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("8 send * recv [x=320,y=240]"));
+ ASSERT_EQ(8U, *imageattr.pt);
+ ASSERT_FALSE(imageattr.recvAll);
+ ASSERT_EQ(1U, imageattr.recvSets.size());
+ ASSERT_EQ(1U, imageattr.recvSets[0].xRange.discreteValues.size());
+ ASSERT_EQ(320U, imageattr.recvSets[0].xRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.recvSets[0].yRange.discreteValues.size());
+ ASSERT_EQ(240U, imageattr.recvSets[0].yRange.discreteValues[0]);
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ }
+
+ {
+ SdpImageattrAttributeList::Imageattr imageattr(
+ ParseImageattr("8 send * recv [x=320,y=240] [x=640,y=480]"));
+ ASSERT_EQ(8U, *imageattr.pt);
+ ASSERT_FALSE(imageattr.recvAll);
+ ASSERT_EQ(2U, imageattr.recvSets.size());
+ ASSERT_EQ(1U, imageattr.recvSets[0].xRange.discreteValues.size());
+ ASSERT_EQ(320U, imageattr.recvSets[0].xRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.recvSets[0].yRange.discreteValues.size());
+ ASSERT_EQ(240U, imageattr.recvSets[0].yRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.recvSets[1].xRange.discreteValues.size());
+ ASSERT_EQ(640U, imageattr.recvSets[1].xRange.discreteValues[0]);
+ ASSERT_EQ(1U, imageattr.recvSets[1].yRange.discreteValues.size());
+ ASSERT_EQ(480U, imageattr.recvSets[1].yRange.discreteValues[0]);
+ ASSERT_TRUE(imageattr.sendAll);
+ ASSERT_TRUE(imageattr.sendSets.empty());
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrParseInvalid)
+{
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("", 0);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>(" ", 0);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("-1", 0);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("99999 ", 5);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("*", 1);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* sen", 5);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* vcer *", 6);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* send x", 7);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>(
+ "* send [x=640,y=480] [", 22);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* send * sen", 12);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* send * vcer *", 13);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* send * send *", 13);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* recv * recv *", 13);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>("* send * recv x", 14);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>(
+ "* send * recv [x=640,y=480] [", 29);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>(
+ "* send * recv [x=640,y=480] *", 28);
+ ParseInvalid<SdpImageattrAttributeList::Imageattr>(
+ "* send * recv [x=640,y=480] foobajooba", 28);
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrXYRangeSerialization)
+{
+ SdpImageattrAttributeList::XYRange range;
+ std::stringstream os;
+
+ range.min = 320;
+ range.max = 640;
+ range.Serialize(os);
+ ASSERT_EQ("[320:640]", os.str());
+ os.str(""); // clear
+
+ range.step = 16;
+ range.Serialize(os);
+ ASSERT_EQ("[320:16:640]", os.str());
+ os.str(""); // clear
+
+ range.min = 0;
+ range.max = 0;
+ range.discreteValues.push_back(320);
+ range.Serialize(os);
+ ASSERT_EQ("320", os.str());
+ os.str("");
+
+ range.discreteValues.push_back(640);
+ range.Serialize(os);
+ ASSERT_EQ("[320,640]", os.str());
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSRangeSerialization)
+{
+ SdpImageattrAttributeList::SRange range;
+ std::ostringstream os;
+
+ range.min = 0.1f;
+ range.max = 0.9999f;
+ range.Serialize(os);
+ ASSERT_EQ("[0.1000-0.9999]", os.str());
+ os.str("");
+
+ range.min = 0.0f;
+ range.max = 0.0f;
+ range.discreteValues.push_back(0.1f);
+ range.Serialize(os);
+ ASSERT_EQ("0.1000", os.str());
+ os.str("");
+
+ range.discreteValues.push_back(0.5f);
+ range.Serialize(os);
+ ASSERT_EQ("[0.1000,0.5000]", os.str());
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrPRangeSerialization)
+{
+ SdpImageattrAttributeList::PRange range;
+ std::ostringstream os;
+
+ range.min = 0.1f;
+ range.max = 0.9999f;
+ range.Serialize(os);
+ ASSERT_EQ("[0.1000-0.9999]", os.str());
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSetSerialization)
+{
+ SdpImageattrAttributeList::Set set;
+ std::ostringstream os;
+
+ set.xRange.discreteValues.push_back(640);
+ set.yRange.discreteValues.push_back(480);
+ set.Serialize(os);
+ ASSERT_EQ("[x=640,y=480]", os.str());
+ os.str("");
+
+ set.qValue = 0.00f;
+ set.Serialize(os);
+ ASSERT_EQ("[x=640,y=480,q=0.00]", os.str());
+ os.str("");
+
+ set.qValue = 0.10f;
+ set.Serialize(os);
+ ASSERT_EQ("[x=640,y=480,q=0.10]", os.str());
+ os.str("");
+
+ set.qValue = 1.00f;
+ set.Serialize(os);
+ ASSERT_EQ("[x=640,y=480,q=1.00]", os.str());
+ os.str("");
+
+ set.sRange.discreteValues.push_back(1.1f);
+ set.Serialize(os);
+ ASSERT_EQ("[x=640,y=480,sar=1.1000,q=1.00]", os.str());
+ os.str("");
+
+ set.pRange.min = 0.9f;
+ set.pRange.max = 1.1f;
+ set.Serialize(os);
+ ASSERT_EQ("[x=640,y=480,sar=1.1000,par=[0.9000-1.1000],q=1.00]", os.str());
+ os.str("");
+}
+
+TEST(NewSdpTestNoFixture, CheckImageattrSerialization)
+{
+ SdpImageattrAttributeList::Imageattr imageattr;
+ std::ostringstream os;
+
+ imageattr.sendAll = true;
+ imageattr.pt = Some<uint16_t>(8U);
+ imageattr.Serialize(os);
+ ASSERT_EQ("8 send *", os.str());
+ os.str("");
+
+ imageattr.pt.reset();;
+ imageattr.Serialize(os);
+ ASSERT_EQ("* send *", os.str());
+ os.str("");
+
+ imageattr.sendAll = false;
+ imageattr.recvAll = true;
+ imageattr.Serialize(os);
+ ASSERT_EQ("* recv *", os.str());
+ os.str("");
+
+ imageattr.sendAll = true;
+ imageattr.Serialize(os);
+ ASSERT_EQ("* send * recv *", os.str());
+ os.str("");
+
+ imageattr.sendAll = false;
+ imageattr.sendSets.push_back(SdpImageattrAttributeList::Set());
+ imageattr.sendSets.back().xRange.discreteValues.push_back(320);
+ imageattr.sendSets.back().yRange.discreteValues.push_back(240);
+ imageattr.Serialize(os);
+ ASSERT_EQ("* send [x=320,y=240] recv *", os.str());
+ os.str("");
+
+ imageattr.sendSets.push_back(SdpImageattrAttributeList::Set());
+ imageattr.sendSets.back().xRange.discreteValues.push_back(640);
+ imageattr.sendSets.back().yRange.discreteValues.push_back(480);
+ imageattr.Serialize(os);
+ ASSERT_EQ("* send [x=320,y=240] [x=640,y=480] recv *", os.str());
+ os.str("");
+
+ imageattr.recvAll = false;
+ imageattr.recvSets.push_back(SdpImageattrAttributeList::Set());
+ imageattr.recvSets.back().xRange.discreteValues.push_back(320);
+ imageattr.recvSets.back().yRange.discreteValues.push_back(240);
+ imageattr.Serialize(os);
+ ASSERT_EQ("* send [x=320,y=240] [x=640,y=480] recv [x=320,y=240]", os.str());
+ os.str("");
+
+ imageattr.recvSets.push_back(SdpImageattrAttributeList::Set());
+ imageattr.recvSets.back().xRange.discreteValues.push_back(640);
+ imageattr.recvSets.back().yRange.discreteValues.push_back(480);
+ imageattr.Serialize(os);
+ ASSERT_EQ(
+ "* send [x=320,y=240] [x=640,y=480] recv [x=320,y=240] [x=640,y=480]",
+ os.str());
+ os.str("");
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastVersionSerialize)
+{
+ std::ostringstream os;
+
+ SdpSimulcastAttribute::Version version;
+ version.choices.push_back("8");
+ version.Serialize(os);
+ ASSERT_EQ("8", os.str());
+ os.str("");
+
+ version.choices.push_back("9");
+ version.Serialize(os);
+ ASSERT_EQ("8,9", os.str());
+ os.str("");
+
+ version.choices.push_back("0");
+ version.Serialize(os);
+ ASSERT_EQ("8,9,0", os.str());
+ os.str("");
+}
+
+static SdpSimulcastAttribute::Version
+ParseSimulcastVersion(const std::string& input)
+{
+ std::istringstream is(input + ";");
+ std::string error;
+ SdpSimulcastAttribute::Version version;
+ EXPECT_TRUE(version.Parse(is, &error)) << error;
+ EXPECT_EQ(';', is.get());
+ EXPECT_EQ(EOF, is.get());
+ return version;
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastVersionValidParse)
+{
+ {
+ SdpSimulcastAttribute::Version version(
+ ParseSimulcastVersion("1"));
+ ASSERT_EQ(1U, version.choices.size());
+ ASSERT_EQ("1", version.choices[0]);
+ }
+
+ {
+ SdpSimulcastAttribute::Version version(
+ ParseSimulcastVersion("1,2"));
+ ASSERT_EQ(2U, version.choices.size());
+ ASSERT_EQ("1", version.choices[0]);
+ ASSERT_EQ("2", version.choices[1]);
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastVersionInvalidParse)
+{
+ ParseInvalid<SdpSimulcastAttribute::Version>("", 0);
+ ParseInvalid<SdpSimulcastAttribute::Version>(",", 0);
+ ParseInvalid<SdpSimulcastAttribute::Version>(";", 0);
+ ParseInvalid<SdpSimulcastAttribute::Version>(" ", 0);
+ ParseInvalid<SdpSimulcastAttribute::Version>("8,", 2);
+ ParseInvalid<SdpSimulcastAttribute::Version>("8, ", 2);
+ ParseInvalid<SdpSimulcastAttribute::Version>("8,,", 2);
+ ParseInvalid<SdpSimulcastAttribute::Version>("8,;", 2);
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastVersionsSerialize)
+{
+ std::ostringstream os;
+
+ SdpSimulcastAttribute::Versions versions;
+ versions.type = SdpSimulcastAttribute::Versions::kPt;
+ versions.push_back(SdpSimulcastAttribute::Version());
+ versions.back().choices.push_back("8");
+ versions.Serialize(os);
+ ASSERT_EQ("pt=8", os.str());
+ os.str("");
+
+ versions.type = SdpSimulcastAttribute::Versions::kRid;
+ versions.Serialize(os);
+ ASSERT_EQ("rid=8", os.str());
+ os.str("");
+
+ versions.push_back(SdpSimulcastAttribute::Version());
+ versions.Serialize(os);
+ ASSERT_EQ("rid=8", os.str());
+ os.str("");
+
+ versions.back().choices.push_back("9");
+ versions.Serialize(os);
+ ASSERT_EQ("rid=8;9", os.str());
+ os.str("");
+
+ versions.push_back(SdpSimulcastAttribute::Version());
+ versions.back().choices.push_back("0");
+ versions.Serialize(os);
+ ASSERT_EQ("rid=8;9;0", os.str());
+ os.str("");
+}
+
+static SdpSimulcastAttribute::Versions
+ParseSimulcastVersions(const std::string& input)
+{
+ std::istringstream is(input + " ");
+ std::string error;
+ SdpSimulcastAttribute::Versions list;
+ EXPECT_TRUE(list.Parse(is, &error)) << error;
+ EXPECT_EQ(' ', is.get());
+ EXPECT_EQ(EOF, is.get());
+ return list;
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastVersionsValidParse)
+{
+ {
+ SdpSimulcastAttribute::Versions versions(
+ ParseSimulcastVersions("pt=8"));
+ ASSERT_EQ(1U, versions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt, versions.type);
+ ASSERT_EQ(1U, versions[0].choices.size());
+ ASSERT_EQ("8", versions[0].choices[0]);
+ }
+
+ {
+ SdpSimulcastAttribute::Versions versions(
+ ParseSimulcastVersions("rid=8"));
+ ASSERT_EQ(1U, versions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kRid, versions.type);
+ ASSERT_EQ(1U, versions[0].choices.size());
+ ASSERT_EQ("8", versions[0].choices[0]);
+ }
+
+ {
+ SdpSimulcastAttribute::Versions versions(
+ ParseSimulcastVersions("pt=8,9"));
+ ASSERT_EQ(1U, versions.size());
+ ASSERT_EQ(2U, versions[0].choices.size());
+ ASSERT_EQ("8", versions[0].choices[0]);
+ ASSERT_EQ("9", versions[0].choices[1]);
+ }
+
+ {
+ SdpSimulcastAttribute::Versions versions(
+ ParseSimulcastVersions("pt=8,9;10"));
+ ASSERT_EQ(2U, versions.size());
+ ASSERT_EQ(2U, versions[0].choices.size());
+ ASSERT_EQ("8", versions[0].choices[0]);
+ ASSERT_EQ("9", versions[0].choices[1]);
+ ASSERT_EQ(1U, versions[1].choices.size());
+ ASSERT_EQ("10", versions[1].choices[0]);
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastVersionsInvalidParse)
+{
+ ParseInvalid<SdpSimulcastAttribute::Versions>("", 0);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("x", 1);
+ ParseInvalid<SdpSimulcastAttribute::Versions>(";", 1);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("8", 1);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("foo=", 4);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("foo=8", 4);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("pt=9999", 7);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("pt=-1", 5);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("pt=x", 4);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("pt=8;", 5);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("pt=8;x", 6);
+ ParseInvalid<SdpSimulcastAttribute::Versions>("pt=8;;", 5);
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastSerialize)
+{
+ std::ostringstream os;
+
+ SdpSimulcastAttribute simulcast;
+ simulcast.recvVersions.type = SdpSimulcastAttribute::Versions::kPt;
+ simulcast.recvVersions.push_back(SdpSimulcastAttribute::Version());
+ simulcast.recvVersions.back().choices.push_back("8");
+ simulcast.Serialize(os);
+ ASSERT_EQ("a=simulcast: recv pt=8" CRLF, os.str());
+ os.str("");
+
+ simulcast.sendVersions.push_back(SdpSimulcastAttribute::Version());
+ simulcast.sendVersions.back().choices.push_back("9");
+ simulcast.Serialize(os);
+ ASSERT_EQ("a=simulcast: send rid=9 recv pt=8" CRLF, os.str());
+ os.str("");
+}
+
+static SdpSimulcastAttribute
+ParseSimulcast(const std::string& input)
+{
+ std::istringstream is(input);
+ std::string error;
+ SdpSimulcastAttribute simulcast;
+ EXPECT_TRUE(simulcast.Parse(is, &error)) << error;
+ EXPECT_TRUE(is.eof());
+ return simulcast;
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastValidParse)
+{
+ {
+ SdpSimulcastAttribute simulcast(ParseSimulcast(" send pt=8"));
+ ASSERT_EQ(1U, simulcast.sendVersions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt,
+ simulcast.sendVersions.type);
+ ASSERT_EQ(1U, simulcast.sendVersions[0].choices.size());
+ ASSERT_EQ("8", simulcast.sendVersions[0].choices[0]);
+ ASSERT_EQ(0U, simulcast.recvVersions.size());
+ }
+
+ {
+ SdpSimulcastAttribute simulcast(ParseSimulcast(" SEND pt=8"));
+ ASSERT_EQ(1U, simulcast.sendVersions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt,
+ simulcast.sendVersions.type);
+ ASSERT_EQ(1U, simulcast.sendVersions[0].choices.size());
+ ASSERT_EQ("8", simulcast.sendVersions[0].choices[0]);
+ ASSERT_EQ(0U, simulcast.recvVersions.size());
+ }
+
+ {
+ SdpSimulcastAttribute simulcast(ParseSimulcast(" recv pt=8"));
+ ASSERT_EQ(1U, simulcast.recvVersions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt,
+ simulcast.recvVersions.type);
+ ASSERT_EQ(1U, simulcast.recvVersions[0].choices.size());
+ ASSERT_EQ("8", simulcast.recvVersions[0].choices[0]);
+ ASSERT_EQ(0U, simulcast.sendVersions.size());
+ }
+
+ {
+ SdpSimulcastAttribute simulcast(
+ ParseSimulcast(
+ " send pt=8,9;101;97,98 recv pt=101,120;97"));
+ ASSERT_EQ(3U, simulcast.sendVersions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt,
+ simulcast.sendVersions.type);
+ ASSERT_EQ(2U, simulcast.sendVersions[0].choices.size());
+ ASSERT_EQ("8", simulcast.sendVersions[0].choices[0]);
+ ASSERT_EQ("9", simulcast.sendVersions[0].choices[1]);
+ ASSERT_EQ(1U, simulcast.sendVersions[1].choices.size());
+ ASSERT_EQ("101", simulcast.sendVersions[1].choices[0]);
+ ASSERT_EQ(2U, simulcast.sendVersions[2].choices.size());
+ ASSERT_EQ("97", simulcast.sendVersions[2].choices[0]);
+ ASSERT_EQ("98", simulcast.sendVersions[2].choices[1]);
+
+ ASSERT_EQ(2U, simulcast.recvVersions.size());
+ ASSERT_EQ(SdpSimulcastAttribute::Versions::kPt,
+ simulcast.recvVersions.type);
+ ASSERT_EQ(2U, simulcast.recvVersions[0].choices.size());
+ ASSERT_EQ("101", simulcast.recvVersions[0].choices[0]);
+ ASSERT_EQ("120", simulcast.recvVersions[0].choices[1]);
+ ASSERT_EQ(1U, simulcast.recvVersions[1].choices.size());
+ ASSERT_EQ("97", simulcast.recvVersions[1].choices[0]);
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckSimulcastInvalidParse)
+{
+ ParseInvalid<SdpSimulcastAttribute>("", 0);
+ ParseInvalid<SdpSimulcastAttribute>(" ", 1);
+ ParseInvalid<SdpSimulcastAttribute>("vcer ", 4);
+ ParseInvalid<SdpSimulcastAttribute>(" send x", 7);
+ ParseInvalid<SdpSimulcastAttribute>(" recv x", 7);
+ ParseInvalid<SdpSimulcastAttribute>(" send pt=8 send ", 15);
+ ParseInvalid<SdpSimulcastAttribute>(" recv pt=8 recv ", 15);
+}
+
+static SdpRidAttributeList::Rid
+ParseRid(const std::string& input)
+{
+ std::istringstream is(input);
+ std::string error;
+ SdpRidAttributeList::Rid rid;
+ EXPECT_TRUE(rid.Parse(is, &error)) << error;
+ EXPECT_TRUE(is.eof());
+ return rid;
+}
+
+TEST(NewSdpTestNoFixture, CheckRidValidParse)
+{
+ {
+ SdpRidAttributeList::Rid rid(ParseRid("1 send"));
+ ASSERT_EQ("1", rid.id);
+ ASSERT_EQ(sdp::kSend, rid.direction);
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(ParseRid("1 send pt=96;max-width=800"));
+ ASSERT_EQ("1", rid.id);
+ ASSERT_EQ(sdp::kSend, rid.direction);
+ ASSERT_EQ(1U, rid.formats.size());
+ ASSERT_EQ(96U, rid.formats[0]);
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(ParseRid("1 send pt=96,97,98;max-width=800"));
+ ASSERT_EQ("1", rid.id);
+ ASSERT_EQ(sdp::kSend, rid.direction);
+ ASSERT_EQ(3U, rid.formats.size());
+ ASSERT_EQ(96U, rid.formats[0]);
+ ASSERT_EQ(97U, rid.formats[1]);
+ ASSERT_EQ(98U, rid.formats[2]);
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("0123456789az-_ recv max-width=800"));
+ ASSERT_EQ("0123456789az-_", rid.id);
+ ASSERT_EQ(sdp::kRecv, rid.direction);
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send pt=96"));
+ ASSERT_EQ(1U, rid.formats.size());
+ ASSERT_EQ(96U, rid.formats[0]);
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ // This is not technically permitted by the BNF, but the parse code is simpler
+ // if we allow it. If we decide to stop allowing this, this will need to be
+ // converted to an invalid parse test-case.
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-br=30000;pt=96"));
+ ASSERT_EQ(1U, rid.formats.size());
+ ASSERT_EQ(96U, rid.formats[0]);
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(30000U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send pt=96,97,98"));
+ ASSERT_EQ(3U, rid.formats.size());
+ ASSERT_EQ(96U, rid.formats[0]);
+ ASSERT_EQ(97U, rid.formats[1]);
+ ASSERT_EQ(98U, rid.formats[2]);
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-width=800"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-height=640"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(640U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-fps=30"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(30U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-fs=3600"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(3600U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-br=30000"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(30000U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-pps=9216000"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(9216000U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send depend=foo"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(1U, rid.dependIds.size());
+ ASSERT_EQ("foo", rid.dependIds[0]);
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-foo=20"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send depend=foo,bar"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(0U, rid.constraints.maxWidth);
+ ASSERT_EQ(0U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(2U, rid.dependIds.size());
+ ASSERT_EQ("foo", rid.dependIds[0]);
+ ASSERT_EQ("bar", rid.dependIds[1]);
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-width=800;max-height=600"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(600U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send pt=96,97;max-width=800;max-height=600"));
+ ASSERT_EQ(2U, rid.formats.size());
+ ASSERT_EQ(96U, rid.formats[0]);
+ ASSERT_EQ(97U, rid.formats[1]);
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(600U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send depend=foo,bar;max-width=800;max-height=600"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(600U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(2U, rid.dependIds.size());
+ ASSERT_EQ("foo", rid.dependIds[0]);
+ ASSERT_EQ("bar", rid.dependIds[1]);
+ }
+
+ {
+ SdpRidAttributeList::Rid rid(
+ ParseRid("foo send max-foo=20;max-width=800;max-height=600"));
+ ASSERT_EQ(0U, rid.formats.size());
+ ASSERT_EQ(800U, rid.constraints.maxWidth);
+ ASSERT_EQ(600U, rid.constraints.maxHeight);
+ ASSERT_EQ(0U, rid.constraints.maxFps);
+ ASSERT_EQ(0U, rid.constraints.maxFs);
+ ASSERT_EQ(0U, rid.constraints.maxBr);
+ ASSERT_EQ(0U, rid.constraints.maxPps);
+ ASSERT_EQ(0U, rid.dependIds.size());
+ }
+}
+
+TEST(NewSdpTestNoFixture, CheckRidInvalidParse)
+{
+ ParseInvalid<SdpRidAttributeList::Rid>("", 0);
+ ParseInvalid<SdpRidAttributeList::Rid>(" ", 0);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo", 3);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo ", 4);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo ", 5);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo bar", 7);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo recv ", 9);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo recv pt=", 12);
+ ParseInvalid<SdpRidAttributeList::Rid>(" ", 0);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send pt", 11);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send pt=", 12);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send pt=x", 12);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send pt=-1", 12);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send pt=96,", 15);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send pt=196", 15);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send max-width", 18);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send max-width=", 19);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send max-width=x", 19);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send max-width=-1", 19);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send max-width=800;", 23);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send max-width=800; ", 24);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send depend=",16);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send depend=,", 16);
+ ParseInvalid<SdpRidAttributeList::Rid>("foo send depend=1,", 18);
+}
+
+TEST(NewSdpTestNoFixture, CheckRidSerialize)
+{
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.formats.push_back(96);
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send pt=96", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.formats.push_back(96);
+ rid.formats.push_back(97);
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send pt=96,97", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.constraints.maxWidth = 800;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send max-width=800", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.constraints.maxHeight = 600;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send max-height=600", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.constraints.maxFps = 30;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send max-fps=30", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.constraints.maxFs = 3600;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send max-fs=3600", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.constraints.maxBr = 30000;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send max-br=30000", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.constraints.maxPps = 9216000;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send max-pps=9216000", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.dependIds.push_back("foo");
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send depend=foo", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.dependIds.push_back("foo");
+ rid.dependIds.push_back("bar");
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send depend=foo,bar", os.str());
+ }
+
+ {
+ SdpRidAttributeList::Rid rid;
+ rid.id = "foo";
+ rid.direction = sdp::kSend;
+ rid.formats.push_back(96);
+ rid.constraints.maxBr = 30000;
+ std::ostringstream os;
+ rid.Serialize(os);
+ ASSERT_EQ("foo send pt=96;max-br=30000", os.str());
+ }
+}
+
+} // End namespace test.
+
+int main(int argc, char **argv) {
+ ScopedXPCOM xpcom("sdp_unittests");
+
+ test_utils = new MtransportTestUtils();
+ NSS_NoDB_Init(nullptr);
+ NSS_SetDomesticPolicy();
+
+ ::testing::InitGoogleTest(&argc, argv);
+ int result = RUN_ALL_TESTS();
+
+ PeerConnectionCtx::Destroy();
+ delete test_utils;
+
+ return result;
+}
diff --git a/media/webrtc/signaling/test/signaling_unittests.cpp b/media/webrtc/signaling/test/signaling_unittests.cpp
new file mode 100644
index 000000000..27d4750c7
--- /dev/null
+++ b/media/webrtc/signaling/test/signaling_unittests.cpp
@@ -0,0 +1,4851 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <iostream>
+#include <map>
+#include <algorithm>
+#include <string>
+
+#include "base/basictypes.h"
+#include "logging.h"
+
+#define GTEST_HAS_RTTI 0
+#include "gtest/gtest.h"
+#include "gtest_utils.h"
+
+#include "nspr.h"
+#include "nss.h"
+#include "ssl.h"
+#include "prthread.h"
+
+#include "FakePCObserver.h"
+#include "FakeMediaStreams.h"
+#include "FakeMediaStreamsImpl.h"
+#include "FakeLogging.h"
+#include "PeerConnectionImpl.h"
+#include "PeerConnectionCtx.h"
+#include "PeerConnectionMedia.h"
+#include "MediaPipeline.h"
+#include "runnable_utils.h"
+#include "nsServiceManagerUtils.h"
+#include "mozilla/Services.h"
+#include "nsIPrefService.h"
+#include "nsIPrefBranch.h"
+#include "nsIDNSService.h"
+#include "nsQueryObject.h"
+#include "nsWeakReference.h"
+#include "nricectx.h"
+#include "rlogconnector.h"
+#include "mozilla/SyncRunnable.h"
+#include "logging.h"
+#include "stunserver.h"
+#include "stunserver.cpp"
+#ifdef SIGNALING_UNITTEST_STANDALONE
+#include "PeerConnectionImplEnumsBinding.cpp"
+#endif
+
+#include "FakeIPC.h"
+#include "FakeIPC.cpp"
+
+#include "ice_ctx.h"
+#include "ice_peer_ctx.h"
+
+#include "mtransport_test_utils.h"
+#include "gtest_ringbuffer_dumper.h"
+MtransportTestUtils *test_utils;
+nsCOMPtr<nsIThread> gMainThread;
+nsCOMPtr<nsIThread> gGtestThread;
+bool gTestsComplete = false;
+
+#ifndef USE_FAKE_MEDIA_STREAMS
+#error USE_FAKE_MEDIA_STREAMS undefined
+#endif
+#ifndef USE_FAKE_PCOBSERVER
+#error USE_FAKE_PCOBSERVER undefined
+#endif
+
+static int kDefaultTimeout = 10000;
+
+static std::string callerName = "caller";
+static std::string calleeName = "callee";
+
+#define ARRAY_TO_STL(container, type, array) \
+ (container<type>((array), (array) + PR_ARRAY_SIZE(array)))
+
+#define ARRAY_TO_SET(type, array) ARRAY_TO_STL(std::set, type, array)
+
+std::string g_stun_server_address((char *)"23.21.150.121");
+uint16_t g_stun_server_port(3478);
+std::string kBogusSrflxAddress((char *)"192.0.2.1");
+uint16_t kBogusSrflxPort(1001);
+
+// We can't use webidl bindings here because it uses nsString,
+// so we pass options in using OfferOptions instead
+class OfferOptions : public mozilla::JsepOfferOptions {
+public:
+ void setInt32Option(const char *namePtr, size_t value) {
+ if (!strcmp(namePtr, "OfferToReceiveAudio")) {
+ mOfferToReceiveAudio = mozilla::Some(value);
+ } else if (!strcmp(namePtr, "OfferToReceiveVideo")) {
+ mOfferToReceiveVideo = mozilla::Some(value);
+ }
+ }
+ void setBoolOption(const char* namePtr, bool value) {
+ if (!strcmp(namePtr, "IceRestart")) {
+ mIceRestart = mozilla::Some(value);
+ }
+ }
+private:
+};
+
+using namespace mozilla;
+using namespace mozilla::dom;
+
+// XXX Workaround for bug 998092 to maintain the existing broken semantics
+template<>
+struct nsISupportsWeakReference::COMTypeInfo<nsSupportsWeakReference, void> {
+ static const nsIID kIID;
+};
+//const nsIID nsISupportsWeakReference::COMTypeInfo<nsSupportsWeakReference, void>::kIID = NS_ISUPPORTSWEAKREFERENCE_IID;
+
+namespace test {
+
+class SignalingAgent;
+
+std::string indent(const std::string &s, int width = 4) {
+ std::string prefix;
+ std::string out;
+ char previous = '\n';
+ prefix.assign(width, ' ');
+ for (std::string::const_iterator i = s.begin(); i != s.end(); i++) {
+ if (previous == '\n') {
+ out += prefix;
+ }
+ out += *i;
+ previous = *i;
+ }
+ return out;
+}
+
+static const std::string strSampleSdpAudioVideoNoIce =
+ "v=0\r\n"
+ "o=Mozilla-SIPUA 4949 0 IN IP4 10.86.255.143\r\n"
+ "s=SIP Call\r\n"
+ "t=0 0\r\n"
+ "a=ice-ufrag:qkEP\r\n"
+ "a=ice-pwd:ed6f9GuHjLcoCN6sC/Eh7fVl\r\n"
+ "m=audio 16384 RTP/AVP 0 8 9 101\r\n"
+ "c=IN IP4 10.86.255.143\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:9 G722/8000\r\n"
+ "a=rtpmap:101 telephone-event/8000\r\n"
+ "a=fmtp:101 0-15\r\n"
+ "a=sendrecv\r\n"
+ "a=candidate:1 1 UDP 2130706431 192.168.2.1 50005 typ host\r\n"
+ "a=candidate:2 2 UDP 2130706431 192.168.2.2 50006 typ host\r\n"
+ "m=video 1024 RTP/AVP 97\r\n"
+ "c=IN IP4 10.86.255.143\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=fmtp:97 profile-level-id=42E00C\r\n"
+ "a=sendrecv\r\n"
+ "a=candidate:1 1 UDP 2130706431 192.168.2.3 50007 typ host\r\n"
+ "a=candidate:2 2 UDP 2130706431 192.168.2.4 50008 typ host\r\n";
+
+static const std::string strSampleCandidate =
+ "a=candidate:1 1 UDP 2130706431 192.168.2.1 50005 typ host\r\n";
+
+static const std::string strSampleMid = "sdparta";
+
+static const unsigned short nSamplelevel = 2;
+
+static const std::string strG711SdpOffer =
+ "v=0\r\n"
+ "o=- 1 1 IN IP4 148.147.200.251\r\n"
+ "s=-\r\n"
+ "b=AS:64\r\n"
+ "t=0 0\r\n"
+ "a=fingerprint:sha-256 F3:FA:20:C0:CD:48:C4:5F:02:5F:A5:D3:21:D0:2D:48:"
+ "7B:31:60:5C:5A:D8:0D:CD:78:78:6C:6D:CE:CC:0C:67\r\n"
+ "m=audio 9000 RTP/AVP 0 8 126\r\n"
+ "c=IN IP4 148.147.200.251\r\n"
+ "b=TIAS:64000\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:126 telephone-event/8000\r\n"
+ "a=candidate:0 1 udp 2130706432 148.147.200.251 9000 typ host\r\n"
+ "a=candidate:0 2 udp 2130706432 148.147.200.251 9005 typ host\r\n"
+ "a=ice-ufrag:cYuakxkEKH+RApYE\r\n"
+ "a=ice-pwd:bwtpzLZD+3jbu8vQHvEa6Xuq\r\n"
+ "a=setup:active\r\n"
+ "a=sendrecv\r\n";
+
+
+enum sdpTestFlags
+{
+ HAS_ALL_CANDIDATES = (1 << 0),
+};
+
+enum offerAnswerFlags
+{
+ OFFER_NONE = 0, // Sugar to make function calls clearer.
+ OFFER_AUDIO = (1<<0),
+ OFFER_VIDEO = (1<<1),
+ // Leaving some room here for other media types
+ ANSWER_NONE = 0, // Sugar to make function calls clearer.
+ ANSWER_AUDIO = (1<<8),
+ ANSWER_VIDEO = (1<<9),
+
+ OFFER_AV = OFFER_AUDIO | OFFER_VIDEO,
+ ANSWER_AV = ANSWER_AUDIO | ANSWER_VIDEO
+};
+
+ typedef enum {
+ NO_TRICKLE = 0,
+ OFFERER_TRICKLES = 1,
+ ANSWERER_TRICKLES = 2,
+ BOTH_TRICKLE = OFFERER_TRICKLES | ANSWERER_TRICKLES
+ } TrickleType;
+
+class TestObserver : public AFakePCObserver
+{
+protected:
+ ~TestObserver() {}
+
+public:
+ TestObserver(PeerConnectionImpl *peerConnection,
+ const std::string &aName) :
+ AFakePCObserver(peerConnection, aName),
+ lastAddIceStatusCode(PeerConnectionImpl::kNoError),
+ peerAgent(nullptr),
+ trickleCandidates(true)
+ {}
+
+ size_t MatchingCandidates(const std::string& cand) {
+ size_t count = 0;
+
+ for (size_t i=0; i<candidates.size(); ++i) {
+ if (candidates[i].find(cand) != std::string::npos)
+ ++count;
+ }
+
+ return count;
+ }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_IMETHOD OnCreateOfferSuccess(const char* offer, ER&) override;
+ NS_IMETHOD OnCreateOfferError(uint32_t code, const char *msg, ER&) override;
+ NS_IMETHOD OnCreateAnswerSuccess(const char* answer, ER&) override;
+ NS_IMETHOD OnCreateAnswerError(uint32_t code, const char *msg, ER&) override;
+ NS_IMETHOD OnSetLocalDescriptionSuccess(ER&) override;
+ NS_IMETHOD OnSetRemoteDescriptionSuccess(ER&) override;
+ NS_IMETHOD OnSetLocalDescriptionError(uint32_t code, const char *msg, ER&) override;
+ NS_IMETHOD OnSetRemoteDescriptionError(uint32_t code, const char *msg, ER&) override;
+ NS_IMETHOD NotifyDataChannel(nsIDOMDataChannel *channel, ER&) override;
+ NS_IMETHOD OnStateChange(PCObserverStateType state_type, ER&, void*) override;
+ NS_IMETHOD OnAddStream(DOMMediaStream &stream, ER&) override;
+ NS_IMETHOD OnRemoveStream(DOMMediaStream &stream, ER&) override;
+ NS_IMETHOD OnAddTrack(MediaStreamTrack &track, ER&) override;
+ NS_IMETHOD OnRemoveTrack(MediaStreamTrack &track, ER&) override;
+ NS_IMETHOD OnReplaceTrackSuccess(ER&) override;
+ NS_IMETHOD OnReplaceTrackError(uint32_t code, const char *msg, ER&) override;
+ NS_IMETHOD OnAddIceCandidateSuccess(ER&) override;
+ NS_IMETHOD OnAddIceCandidateError(uint32_t code, const char *msg, ER&) override;
+ NS_IMETHOD OnIceCandidate(uint16_t level, const char *mid, const char *cand, ER&) override;
+ NS_IMETHOD OnNegotiationNeeded(ER&) override;
+
+ // Hack because add_ice_candidates can happen asynchronously with respect
+ // to the API calls. The whole test suite needs a refactor.
+ ResponseState addIceCandidateState;
+ PeerConnectionImpl::Error lastAddIceStatusCode;
+
+ SignalingAgent* peerAgent;
+ bool trickleCandidates;
+};
+
+NS_IMPL_ISUPPORTS(TestObserver, nsISupportsWeakReference)
+
+NS_IMETHODIMP
+TestObserver::OnCreateOfferSuccess(const char* offer, ER&)
+{
+ lastString = offer;
+ state = stateSuccess;
+ std::cout << name << ": onCreateOfferSuccess = " << std::endl << indent(offer)
+ << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnCreateOfferError(uint32_t code, const char *message, ER&)
+{
+ lastStatusCode = static_cast<PeerConnectionImpl::Error>(code);
+ state = stateError;
+ std::cout << name << ": onCreateOfferError = " << code
+ << " (" << message << ")" << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnCreateAnswerSuccess(const char* answer, ER&)
+{
+ lastString = answer;
+ state = stateSuccess;
+ std::cout << name << ": onCreateAnswerSuccess =" << std::endl
+ << indent(answer) << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnCreateAnswerError(uint32_t code, const char *message, ER&)
+{
+ lastStatusCode = static_cast<PeerConnectionImpl::Error>(code);
+ std::cout << name << ": onCreateAnswerError = " << code
+ << " (" << message << ")" << std::endl;
+ state = stateError;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnSetLocalDescriptionSuccess(ER&)
+{
+ lastStatusCode = PeerConnectionImpl::kNoError;
+ state = stateSuccess;
+ std::cout << name << ": onSetLocalDescriptionSuccess" << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnSetRemoteDescriptionSuccess(ER&)
+{
+ lastStatusCode = PeerConnectionImpl::kNoError;
+ state = stateSuccess;
+ std::cout << name << ": onSetRemoteDescriptionSuccess" << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnSetLocalDescriptionError(uint32_t code, const char *message, ER&)
+{
+ lastStatusCode = static_cast<PeerConnectionImpl::Error>(code);
+ state = stateError;
+ std::cout << name << ": onSetLocalDescriptionError = " << code
+ << " (" << message << ")" << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnSetRemoteDescriptionError(uint32_t code, const char *message, ER&)
+{
+ lastStatusCode = static_cast<PeerConnectionImpl::Error>(code);
+ state = stateError;
+ std::cout << name << ": onSetRemoteDescriptionError = " << code
+ << " (" << message << ")" << std::endl;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::NotifyDataChannel(nsIDOMDataChannel *channel, ER&)
+{
+ std::cout << name << ": NotifyDataChannel" << std::endl;
+ return NS_OK;
+}
+
+static const char* PCImplSignalingStateStrings[] = {
+ "SignalingInvalid",
+ "SignalingStable",
+ "SignalingHaveLocalOffer",
+ "SignalingHaveRemoteOffer",
+ "SignalingHaveLocalPranswer",
+ "SignalingHaveRemotePranswer",
+ "SignalingClosed"
+};
+
+static const char* PCImplIceConnectionStateStrings[] = {
+ "new",
+ "checking",
+ "connected",
+ "completed",
+ "failed",
+ "disconnected",
+ "closed"
+};
+
+static const char* PCImplIceGatheringStateStrings[] = {
+ "new",
+ "gathering",
+ "complete"
+};
+
+#ifdef SIGNALING_UNITTEST_STANDALONE
+static_assert(ArrayLength(PCImplSignalingStateStrings) ==
+ size_t(PCImplSignalingState::EndGuard_),
+ "Table sizes must match");
+static_assert(ArrayLength(PCImplIceConnectionStateStrings) ==
+ size_t(PCImplIceConnectionState::EndGuard_),
+ "Table sizes must match");
+static_assert(ArrayLength(PCImplIceGatheringStateStrings) ==
+ size_t(PCImplIceGatheringState::EndGuard_),
+ "Table sizes must match");
+#endif // SIGNALING_UNITTEST_STANDALONE
+
+NS_IMETHODIMP
+TestObserver::OnStateChange(PCObserverStateType state_type, ER&, void*)
+{
+ nsresult rv;
+ PCImplIceConnectionState gotice;
+ PCImplIceGatheringState goticegathering;
+ PCImplSignalingState gotsignaling;
+
+ std::cout << name << ": ";
+
+ switch (state_type)
+ {
+ case PCObserverStateType::IceConnectionState:
+ MOZ_ASSERT(NS_IsMainThread());
+ rv = pc->IceConnectionState(&gotice);
+ NS_ENSURE_SUCCESS(rv, rv);
+ std::cout << "ICE Connection State: "
+ << PCImplIceConnectionStateStrings[int(gotice)]
+ << std::endl;
+ break;
+ case PCObserverStateType::IceGatheringState:
+ MOZ_ASSERT(NS_IsMainThread());
+ rv = pc->IceGatheringState(&goticegathering);
+ NS_ENSURE_SUCCESS(rv, rv);
+ std::cout
+ << "ICE Gathering State: "
+ << PCImplIceGatheringStateStrings[int(goticegathering)]
+ << std::endl;
+ break;
+ case PCObserverStateType::SdpState:
+ std::cout << "SDP State: " << std::endl;
+ // NS_ENSURE_SUCCESS(rv, rv);
+ break;
+ case PCObserverStateType::SignalingState:
+ MOZ_ASSERT(NS_IsMainThread());
+ rv = pc->SignalingState(&gotsignaling);
+ NS_ENSURE_SUCCESS(rv, rv);
+ std::cout << "Signaling State: "
+ << PCImplSignalingStateStrings[int(gotsignaling)]
+ << std::endl;
+ break;
+ default:
+ // Unknown State
+ MOZ_CRASH("Unknown state change type.");
+ break;
+ }
+
+ lastStateType = state_type;
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+TestObserver::OnAddStream(DOMMediaStream &stream, ER&)
+{
+ std::cout << name << ": OnAddStream called hints=" << stream.GetHintContents()
+ << " thread=" << PR_GetCurrentThread() << std::endl ;
+
+ onAddStreamCalled = true;
+
+ streams.push_back(&stream);
+
+ // We know that the media stream is secretly a Fake_SourceMediaStream,
+ // so now we can start it pulling from us
+ RefPtr<Fake_SourceMediaStream> fs =
+ static_cast<Fake_SourceMediaStream *>(stream.GetStream());
+
+ test_utils->sts_target()->Dispatch(
+ WrapRunnable(fs, &Fake_SourceMediaStream::Start),
+ NS_DISPATCH_NORMAL);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnRemoveStream(DOMMediaStream &stream, ER&)
+{
+ state = stateSuccess;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnAddTrack(MediaStreamTrack &track, ER&)
+{
+ state = stateSuccess;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnRemoveTrack(MediaStreamTrack &track, ER&)
+{
+ state = stateSuccess;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnReplaceTrackSuccess(ER&)
+{
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnReplaceTrackError(uint32_t code, const char *message, ER&)
+{
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnAddIceCandidateSuccess(ER&)
+{
+ lastAddIceStatusCode = PeerConnectionImpl::kNoError;
+ addIceCandidateState = TestObserver::stateSuccess;
+ std::cout << name << ": onAddIceCandidateSuccess" << std::endl;
+ addIceSuccessCount++;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnAddIceCandidateError(uint32_t code, const char *message, ER&)
+{
+ lastAddIceStatusCode = static_cast<PeerConnectionImpl::Error>(code);
+ addIceCandidateState = TestObserver::stateError;
+ std::cout << name << ": onAddIceCandidateError = " << code
+ << " (" << message << ")" << std::endl;
+ return NS_OK;
+}
+
+class ParsedSDP {
+ public:
+
+ explicit ParsedSDP(const std::string &sdp)
+ {
+ Parse(sdp);
+ }
+
+ void DeleteLines(const std::string &objType,
+ uint32_t limit = UINT32_MAX)
+ {
+ for (auto it = sdp_lines_.begin(); it != sdp_lines_.end() && limit;) {
+ auto temp = it;
+ ++it;
+ if (temp->first == objType) {
+ sdp_lines_.erase(temp);
+ --limit;
+ }
+ }
+ }
+
+ void DeleteLine(const std::string &objType)
+ {
+ DeleteLines(objType, 1);
+ }
+
+ // Replaces the index-th instance of objType in the SDP with
+ // a new string.
+ // If content is an empty string then the line will be removed
+ void ReplaceLine(const std::string &objType,
+ const std::string &content,
+ size_t index = 0)
+ {
+ auto it = FindLine(objType, index);
+ if(it != sdp_lines_.end()) {
+ if (content.empty()) {
+ sdp_lines_.erase(it);
+ } else {
+ (*it) = MakeKeyValue(content);
+ }
+ }
+ }
+
+ void AddLine(const std::string &content)
+ {
+ sdp_lines_.push_back(MakeKeyValue(content));
+ }
+
+ static std::pair<std::string, std::string> MakeKeyValue(
+ const std::string &content)
+ {
+ size_t whiteSpace = content.find(' ');
+ std::string key;
+ std::string value;
+ if (whiteSpace == std::string::npos) {
+ //this is the line with no extra contents
+ //example, v=0, a=sendrecv
+ key = content.substr(0, content.size() - 2);
+ value = "\r\n"; // Checking code assumes this is here.
+ } else {
+ key = content.substr(0, whiteSpace);
+ value = content.substr(whiteSpace+1);
+ }
+ return std::make_pair(key, value);
+ }
+
+ std::list<std::pair<std::string, std::string>>::iterator FindLine(
+ const std::string& objType,
+ size_t index = 0)
+ {
+ for (auto it = sdp_lines_.begin(); it != sdp_lines_.end(); ++it) {
+ if (it->first == objType) {
+ if (index == 0) {
+ return it;
+ }
+ --index;
+ }
+ }
+ return sdp_lines_.end();
+ }
+
+ void InsertLineAfter(const std::string &objType,
+ const std::string &content,
+ size_t index = 0)
+ {
+ auto it = FindLine(objType, index);
+ if (it != sdp_lines_.end()) {
+ sdp_lines_.insert(++it, MakeKeyValue(content));
+ }
+ }
+
+ // Returns the values for all lines of the indicated type
+ // Removes trailing "\r\n" from values.
+ std::vector<std::string> GetLines(std::string objType) const
+ {
+ std::vector<std::string> values;
+ for (auto it = sdp_lines_.begin(); it != sdp_lines_.end(); ++it) {
+ if (it->first == objType) {
+ std::string value = it->second;
+ if (value.find("\r") != std::string::npos) {
+ value = value.substr(0, value.find("\r"));
+ } else {
+ ADD_FAILURE() << "SDP line had no endline; this should never happen.";
+ }
+ values.push_back(value);
+ }
+ }
+ return values;
+ }
+
+ //Parse SDP as std::string into map that looks like:
+ // key: sdp content till first space
+ // value: sdp content after the first space, _including_ \r\n
+ void Parse(const std::string &sdp)
+ {
+ size_t prev = 0;
+ size_t found = 0;
+ for(;;) {
+ found = sdp.find('\n', found + 1);
+ if (found == std::string::npos)
+ break;
+ std::string line = sdp.substr(prev, (found - prev) + 1);
+ sdp_lines_.push_back(MakeKeyValue(line));
+
+ prev = found + 1;
+ }
+ }
+
+ //Convert Internal SDP representation into String representation
+ std::string getSdp() const
+ {
+ std::string sdp;
+
+ for (auto it = sdp_lines_.begin(); it != sdp_lines_.end(); ++it) {
+ sdp += it->first;
+ if (it->second != "\r\n") {
+ sdp += " ";
+ }
+ sdp += it->second;
+ }
+
+ return sdp;
+ }
+
+ void IncorporateCandidate(uint16_t level, const std::string &candidate)
+ {
+ std::string candidate_attribute("a=" + candidate + "\r\n");
+ // InsertLineAfter is 0 indexed, but level is 1 indexed
+ // This assumes that we have only media-level c lines.
+ InsertLineAfter("c=IN", candidate_attribute, level - 1);
+ }
+
+ std::list<std::pair<std::string, std::string>> sdp_lines_;
+};
+
+
+// This class wraps the PeerConnection object and ensures that all calls
+// into it happen on the main thread.
+class PCDispatchWrapper : public nsSupportsWeakReference
+{
+ protected:
+ virtual ~PCDispatchWrapper() {}
+
+ public:
+ explicit PCDispatchWrapper(const RefPtr<PeerConnectionImpl>& peerConnection)
+ : pc_(peerConnection) {}
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+
+ PeerConnectionImpl *pcImpl() const {
+ return pc_;
+ }
+
+ const RefPtr<PeerConnectionMedia>& media() const {
+ return pc_->media();
+ }
+
+ NS_IMETHODIMP Initialize(TestObserver* aObserver,
+ nsGlobalWindow* aWindow,
+ const PeerConnectionConfiguration& aConfiguration,
+ nsIThread* aThread) {
+ nsresult rv;
+
+ observer_ = aObserver;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->Initialize(*aObserver, aWindow, aConfiguration, aThread);
+ } else {
+ // It would have been preferable here to dispatch directly to
+ // PeerConnectionImpl::Initialize but since all the PC methods
+ // have overrides clang will throw a 'couldn't infer template
+ // argument' error.
+ // Instead we are dispatching back to the same method for
+ // all of these.
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::Initialize,
+ aObserver, aWindow, aConfiguration, aThread),
+ NS_DISPATCH_SYNC);
+ rv = NS_OK;
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP CreateOffer(const mozilla::JsepOfferOptions& aOptions) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->CreateOffer(aOptions);
+ EXPECT_EQ(NS_OK, rv);
+ if (NS_FAILED(rv))
+ return rv;
+ EXPECT_EQ(TestObserver::stateSuccess, observer_->state);
+ if (observer_->state != TestObserver::stateSuccess) {
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::CreateOffer, aOptions),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP CreateAnswer() {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->CreateAnswer();
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::CreateAnswer),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP SetLocalDescription (int32_t aAction, const char* aSDP) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->SetLocalDescription(aAction, aSDP);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::SetLocalDescription,
+ aAction, aSDP),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP SetRemoteDescription (int32_t aAction, const char* aSDP) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->SetRemoteDescription(aAction, aSDP);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::SetRemoteDescription,
+ aAction, aSDP),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP AddIceCandidate(const char* aCandidate, const char* aMid,
+ unsigned short aLevel) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->AddIceCandidate(aCandidate, aMid, aLevel);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::AddIceCandidate,
+ aCandidate, aMid, aLevel),
+ NS_DISPATCH_SYNC);
+ }
+ return rv;
+ }
+
+ NS_IMETHODIMP AddTrack(MediaStreamTrack *aTrack,
+ DOMMediaStream *aMediaStream)
+ {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->AddTrack(*aTrack, *aMediaStream);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::AddTrack, aTrack,
+ aMediaStream),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP RemoveTrack(MediaStreamTrack *aTrack) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->RemoveTrack(*aTrack);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::RemoveTrack, aTrack),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP GetLocalDescription(char** aSDP) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->GetLocalDescription(aSDP);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::GetLocalDescription,
+ aSDP),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ NS_IMETHODIMP GetRemoteDescription(char** aSDP) {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->GetRemoteDescription(aSDP);
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::GetRemoteDescription,
+ aSDP),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ mozilla::dom::PCImplSignalingState SignalingState() {
+ mozilla::dom::PCImplSignalingState result;
+
+ if (NS_IsMainThread()) {
+ result = pc_->SignalingState();
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&result, this, &PCDispatchWrapper::SignalingState),
+ NS_DISPATCH_SYNC);
+ }
+
+ return result;
+ }
+
+ mozilla::dom::PCImplIceConnectionState IceConnectionState() {
+ mozilla::dom::PCImplIceConnectionState result;
+
+ if (NS_IsMainThread()) {
+ result = pc_->IceConnectionState();
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&result, this, &PCDispatchWrapper::IceConnectionState),
+ NS_DISPATCH_SYNC);
+ }
+
+ return result;
+ }
+
+ mozilla::dom::PCImplIceGatheringState IceGatheringState() {
+ mozilla::dom::PCImplIceGatheringState result;
+
+ if (NS_IsMainThread()) {
+ result = pc_->IceGatheringState();
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&result, this, &PCDispatchWrapper::IceGatheringState),
+ NS_DISPATCH_SYNC);
+ }
+
+ return result;
+ }
+
+ NS_IMETHODIMP Close() {
+ nsresult rv;
+
+ if (NS_IsMainThread()) {
+ rv = pc_->Close();
+ } else {
+ gMainThread->Dispatch(
+ WrapRunnableRet(&rv, this, &PCDispatchWrapper::Close),
+ NS_DISPATCH_SYNC);
+ }
+
+ return rv;
+ }
+
+ private:
+ RefPtr<PeerConnectionImpl> pc_;
+ RefPtr<TestObserver> observer_;
+};
+
+NS_IMPL_ISUPPORTS(PCDispatchWrapper, nsISupportsWeakReference)
+
+
+struct Msid
+{
+ std::string streamId;
+ std::string trackId;
+ bool operator<(const Msid& other) const {
+ if (streamId < other.streamId) {
+ return true;
+ }
+
+ if (streamId > other.streamId) {
+ return false;
+ }
+
+ return trackId < other.trackId;
+ }
+};
+
+class SignalingAgent {
+ public:
+ explicit SignalingAgent(const std::string &aName,
+ const std::string stun_addr = g_stun_server_address,
+ uint16_t stun_port = g_stun_server_port) :
+ pc(nullptr),
+ name(aName),
+ mBundleEnabled(true),
+ mExpectedFrameRequestType(VideoSessionConduit::FrameRequestPli),
+ mExpectNack(true),
+ mExpectRtcpMuxAudio(true),
+ mExpectRtcpMuxVideo(true),
+ mRemoteDescriptionSet(false) {
+ cfg_.addStunServer(stun_addr, stun_port, kNrIceTransportUdp);
+ cfg_.addStunServer(stun_addr, stun_port, kNrIceTransportTcp);
+
+ PeerConnectionImpl *pcImpl =
+ PeerConnectionImpl::CreatePeerConnection();
+ EXPECT_TRUE(pcImpl);
+ pcImpl->SetAllowIceLoopback(true);
+ pcImpl->SetAllowIceLinkLocal(true);
+ pc = new PCDispatchWrapper(pcImpl);
+ }
+
+
+ ~SignalingAgent() {
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnable(this, &SignalingAgent::Close));
+ }
+
+ void Init_m()
+ {
+ pObserver = new TestObserver(pc->pcImpl(), name);
+ ASSERT_TRUE(pObserver);
+
+ ASSERT_EQ(pc->Initialize(pObserver, nullptr, cfg_, gMainThread), NS_OK);
+ }
+
+ void Init()
+ {
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnable(this, &SignalingAgent::Init_m));
+ }
+
+ void SetBundleEnabled(bool enabled)
+ {
+ mBundleEnabled = enabled;
+ }
+
+ void SetBundlePolicy(JsepBundlePolicy policy)
+ {
+ cfg_.setBundlePolicy(policy);
+ }
+
+ void SetExpectedFrameRequestType(VideoSessionConduit::FrameRequestType type)
+ {
+ mExpectedFrameRequestType = type;
+ }
+
+ void WaitForGather() {
+ ASSERT_TRUE_WAIT(ice_gathering_state() == PCImplIceGatheringState::Complete,
+ kDefaultTimeout);
+
+ std::cout << name << ": Init Complete" << std::endl;
+
+ // Check that the default candidate has been filled out with something
+ std::string localSdp = getLocalDescription();
+
+ std::cout << "Local SDP after gather: " << localSdp;
+ ASSERT_EQ(std::string::npos, localSdp.find("c=IN IP4 0.0.0.0"));
+ ASSERT_EQ(std::string::npos, localSdp.find("m=video 9 "));
+ ASSERT_EQ(std::string::npos, localSdp.find("m=audio 9 "));
+
+ // TODO(bug 1098584): Check for end-of-candidates attr
+ }
+
+ bool WaitForGatherAllowFail() {
+ EXPECT_TRUE_WAIT(
+ ice_gathering_state() == PCImplIceGatheringState::Complete ||
+ ice_connection_state() == PCImplIceConnectionState::Failed,
+ kDefaultTimeout);
+
+ if (ice_connection_state() == PCImplIceConnectionState::Failed) {
+ std::cout << name << ": Init Failed" << std::endl;
+ return false;
+ }
+
+ std::cout << name << "Init Complete" << std::endl;
+ return true;
+ }
+
+ void DropOutgoingTrickleCandidates() {
+ pObserver->trickleCandidates = false;
+ }
+
+ PCImplIceConnectionState ice_connection_state()
+ {
+ return pc->IceConnectionState();
+ }
+
+ PCImplIceGatheringState ice_gathering_state()
+ {
+ return pc->IceGatheringState();
+ }
+
+ PCImplSignalingState signaling_state()
+ {
+ return pc->SignalingState();
+ }
+
+ void Close()
+ {
+ std::cout << name << ": Close" << std::endl;
+
+ pc->Close();
+ pc = nullptr;
+ pObserver = nullptr;
+ }
+
+ bool OfferContains(const std::string& str) {
+ return offer().find(str) != std::string::npos;
+ }
+
+ bool AnswerContains(const std::string& str) {
+ return answer().find(str) != std::string::npos;
+ }
+
+ size_t MatchingCandidates(const std::string& cand) {
+ return pObserver->MatchingCandidates(cand);
+ }
+
+ const std::string& offer() const { return offer_; }
+ const std::string& answer() const { return answer_; }
+
+ std::string getLocalDescription() const {
+ char *sdp = nullptr;
+ pc->GetLocalDescription(&sdp);
+ if (!sdp) {
+ return "";
+ }
+ std::string result(sdp);
+ delete sdp;
+ return result;
+ }
+
+ std::string getRemoteDescription() const {
+ char *sdp = 0;
+ pc->GetRemoteDescription(&sdp);
+ if (!sdp) {
+ return "";
+ }
+ std::string result(sdp);
+ delete sdp;
+ return result;
+ }
+
+ std::string RemoveBundle(const std::string& sdp) const {
+ ParsedSDP parsed(sdp);
+ parsed.DeleteLines("a=group:BUNDLE");
+ return parsed.getSdp();
+ }
+
+ // Adds a stream to the PeerConnection.
+ void AddStream(uint32_t hint =
+ DOMMediaStream::HINT_CONTENTS_AUDIO |
+ DOMMediaStream::HINT_CONTENTS_VIDEO,
+ MediaStream *stream = nullptr) {
+
+ if (!stream && (hint & DOMMediaStream::HINT_CONTENTS_AUDIO)) {
+ // Useful default
+ // Create a media stream as if it came from GUM
+ Fake_AudioStreamSource *audio_stream =
+ new Fake_AudioStreamSource();
+
+ nsresult ret;
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnableRet(&ret, audio_stream, &Fake_MediaStream::Start));
+
+ ASSERT_TRUE(NS_SUCCEEDED(ret));
+ stream = audio_stream;
+ }
+
+ RefPtr<DOMMediaStream> domMediaStream = new DOMMediaStream(stream);
+ domMediaStream->SetHintContents(hint);
+
+ nsTArray<RefPtr<MediaStreamTrack>> tracks;
+ domMediaStream->GetTracks(tracks);
+ for (uint32_t i = 0; i < tracks.Length(); i++) {
+ Msid msid = {domMediaStream->GetId(), tracks[i]->GetId()};
+
+ ASSERT_FALSE(mAddedTracks.count(msid))
+ << msid.streamId << "/" << msid.trackId << " already added";
+
+ mAddedTracks[msid] = (tracks[i]->AsVideoStreamTrack() ?
+ SdpMediaSection::kVideo :
+ SdpMediaSection::kAudio);
+
+ ASSERT_EQ(pc->AddTrack(tracks[i], domMediaStream), NS_OK);
+ }
+ domMediaStreams_.push_back(domMediaStream);
+ }
+
+ // I would love to make this an overload of operator<<, but there's no way to
+ // declare it in a way that works with gtest's header files.
+ std::string DumpTracks(
+ const std::map<Msid, SdpMediaSection::MediaType>& tracks) const
+ {
+ std::ostringstream oss;
+ for (auto it = tracks.begin(); it != tracks.end(); ++it) {
+ oss << it->first.streamId << "/" << it->first.trackId
+ << " (" << it->second << ")" << std::endl;
+ }
+
+ return oss.str();
+ }
+
+ void ExpectMissingTracks(SdpMediaSection::MediaType type)
+ {
+ for (auto it = mAddedTracks.begin(); it != mAddedTracks.end();) {
+ if (it->second == type) {
+ auto temp = it;
+ ++it;
+ mAddedTracks.erase(temp);
+ } else {
+ ++it;
+ }
+ }
+ }
+
+ void CheckLocalPipeline(const std::string& streamId,
+ const std::string& trackId,
+ SdpMediaSection::MediaType type,
+ int pipelineCheckFlags = 0) const
+ {
+ LocalSourceStreamInfo* info;
+ mozilla::SyncRunnable::DispatchToThread(
+ gMainThread, WrapRunnableRet(&info,
+ pc->media(), &PeerConnectionMedia::GetLocalStreamById,
+ streamId));
+
+ ASSERT_TRUE(info) << "No such local stream id: " << streamId;
+
+ RefPtr<MediaPipeline> pipeline;
+
+ mozilla::SyncRunnable::DispatchToThread(
+ gMainThread,
+ WrapRunnableRet(&pipeline, info,
+ &SourceStreamInfo::GetPipelineByTrackId_m,
+ trackId));
+
+ ASSERT_TRUE(pipeline) << "No such local track id: " << trackId;
+
+ if (type == SdpMediaSection::kVideo) {
+ ASSERT_TRUE(pipeline->IsVideo()) << "Local track " << trackId
+ << " was not video";
+ ASSERT_EQ(mExpectRtcpMuxVideo, pipeline->IsDoingRtcpMux())
+ << "Pipeline for remote track " << trackId
+ << " is" << (mExpectRtcpMuxVideo ? " not " : " ") << "using rtcp-mux";
+ // No checking for video RTP yet, since we don't have support for fake
+ // video here yet. (bug 1142320)
+ } else {
+ ASSERT_FALSE(pipeline->IsVideo()) << "Local track " << trackId
+ << " was not audio";
+ WAIT(pipeline->rtp_packets_sent() >= 4 &&
+ pipeline->rtcp_packets_received() >= 1,
+ kDefaultTimeout);
+ ASSERT_LE(4, pipeline->rtp_packets_sent())
+ << "Local track " << trackId << " isn't sending RTP";
+ ASSERT_LE(1, pipeline->rtcp_packets_received())
+ << "Local track " << trackId << " isn't receiving RTCP";
+ ASSERT_EQ(mExpectRtcpMuxAudio, pipeline->IsDoingRtcpMux())
+ << "Pipeline for remote track " << trackId
+ << " is" << (mExpectRtcpMuxAudio ? " not " : " ") << "using rtcp-mux";
+ }
+ }
+
+ void CheckRemotePipeline(const std::string& streamId,
+ const std::string& trackId,
+ SdpMediaSection::MediaType type,
+ int pipelineCheckFlags = 0) const
+ {
+ RemoteSourceStreamInfo* info;
+ mozilla::SyncRunnable::DispatchToThread(
+ gMainThread, WrapRunnableRet(&info,
+ pc->media(), &PeerConnectionMedia::GetRemoteStreamById,
+ streamId));
+
+ ASSERT_TRUE(info) << "No such remote stream id: " << streamId;
+
+ RefPtr<MediaPipeline> pipeline;
+
+ mozilla::SyncRunnable::DispatchToThread(
+ gMainThread,
+ WrapRunnableRet(&pipeline, info,
+ &SourceStreamInfo::GetPipelineByTrackId_m,
+ trackId));
+
+ ASSERT_TRUE(pipeline) << "No such remote track id: " << trackId;
+
+ if (type == SdpMediaSection::kVideo) {
+ ASSERT_TRUE(pipeline->IsVideo()) << "Remote track " << trackId
+ << " was not video";
+ mozilla::MediaSessionConduit *conduit = pipeline->Conduit();
+ ASSERT_TRUE(conduit);
+ ASSERT_EQ(conduit->type(), mozilla::MediaSessionConduit::VIDEO);
+ mozilla::VideoSessionConduit *video_conduit =
+ static_cast<mozilla::VideoSessionConduit*>(conduit);
+ ASSERT_EQ(mExpectNack, video_conduit->UsingNackBasic());
+ ASSERT_EQ(mExpectedFrameRequestType,
+ video_conduit->FrameRequestMethod());
+ ASSERT_EQ(mExpectRtcpMuxVideo, pipeline->IsDoingRtcpMux())
+ << "Pipeline for remote track " << trackId
+ << " is" << (mExpectRtcpMuxVideo ? " not " : " ") << "using rtcp-mux";
+ // No checking for video RTP yet, since we don't have support for fake
+ // video here yet. (bug 1142320)
+ } else {
+ ASSERT_FALSE(pipeline->IsVideo()) << "Remote track " << trackId
+ << " was not audio";
+ WAIT(pipeline->rtp_packets_received() >= 4 &&
+ pipeline->rtcp_packets_sent() >= 1,
+ kDefaultTimeout);
+ ASSERT_LE(4, pipeline->rtp_packets_received())
+ << "Remote track " << trackId << " isn't receiving RTP";
+ ASSERT_LE(1, pipeline->rtcp_packets_sent())
+ << "Remote track " << trackId << " isn't sending RTCP";
+ ASSERT_EQ(mExpectRtcpMuxAudio, pipeline->IsDoingRtcpMux())
+ << "Pipeline for remote track " << trackId
+ << " is" << (mExpectRtcpMuxAudio ? " not " : " ") << "using rtcp-mux";
+ }
+ }
+
+ void RemoveTrack(size_t streamIndex, bool videoTrack = false)
+ {
+ ASSERT_LT(streamIndex, domMediaStreams_.size());
+ nsTArray<RefPtr<MediaStreamTrack>> tracks;
+ domMediaStreams_[streamIndex]->GetTracks(tracks);
+ for (size_t i = 0; i < tracks.Length(); ++i) {
+ if (!!tracks[i]->AsVideoStreamTrack() == videoTrack) {
+ Msid msid;
+ msid.streamId = domMediaStreams_[streamIndex]->GetId();
+ msid.trackId = tracks[i]->GetId();
+ mAddedTracks.erase(msid);
+ ASSERT_EQ(pc->RemoveTrack(tracks[i]), NS_OK);
+ }
+ }
+ }
+
+ void RemoveStream(size_t index) {
+ nsTArray<RefPtr<MediaStreamTrack>> tracks;
+ domMediaStreams_[index]->GetTracks(tracks);
+ for (uint32_t i = 0; i < tracks.Length(); i++) {
+ ASSERT_EQ(pc->RemoveTrack(tracks[i]), NS_OK);
+ }
+ domMediaStreams_.erase(domMediaStreams_.begin() + index);
+ }
+
+ // Removes the stream that was most recently added to the PeerConnection.
+ void RemoveLastStreamAdded() {
+ ASSERT_FALSE(domMediaStreams_.empty());
+ RemoveStream(domMediaStreams_.size() - 1);
+ }
+
+ void CreateOffer(OfferOptions& options,
+ uint32_t offerFlags,
+ PCImplSignalingState endState =
+ PCImplSignalingState::SignalingStable) {
+
+ uint32_t aHintContents = 0;
+ if (offerFlags & OFFER_AUDIO) {
+ aHintContents |= DOMMediaStream::HINT_CONTENTS_AUDIO;
+ }
+ if (offerFlags & OFFER_VIDEO) {
+ aHintContents |= DOMMediaStream::HINT_CONTENTS_VIDEO;
+ }
+ AddStream(aHintContents);
+
+ // Now call CreateOffer as JS would
+ pObserver->state = TestObserver::stateNoResponse;
+ ASSERT_EQ(pc->CreateOffer(options), NS_OK);
+
+ ASSERT_EQ(pObserver->state, TestObserver::stateSuccess);
+ ASSERT_EQ(signaling_state(), endState);
+ offer_ = pObserver->lastString;
+ if (!mBundleEnabled) {
+ offer_ = RemoveBundle(offer_);
+ }
+ }
+
+ // sets the offer to match the local description
+ // which isn't good if you are the answerer
+ void UpdateOffer() {
+ offer_ = getLocalDescription();
+ if (!mBundleEnabled) {
+ offer_ = RemoveBundle(offer_);
+ }
+ }
+
+ void CreateAnswer(uint32_t offerAnswerFlags,
+ PCImplSignalingState endState =
+ PCImplSignalingState::SignalingHaveRemoteOffer) {
+ // Create a media stream as if it came from GUM
+ Fake_AudioStreamSource *audio_stream =
+ new Fake_AudioStreamSource();
+
+ nsresult ret;
+ mozilla::SyncRunnable::DispatchToThread(
+ test_utils->sts_target(),
+ WrapRunnableRet(&ret, audio_stream, &Fake_MediaStream::Start));
+
+ ASSERT_TRUE(NS_SUCCEEDED(ret));
+
+ uint32_t aHintContents = 0;
+ if (offerAnswerFlags & ANSWER_AUDIO) {
+ aHintContents |= DOMMediaStream::HINT_CONTENTS_AUDIO;
+ }
+ if (offerAnswerFlags & ANSWER_VIDEO) {
+ aHintContents |= DOMMediaStream::HINT_CONTENTS_VIDEO;
+ }
+ AddStream(aHintContents, audio_stream);
+
+ // Decide if streams are disabled for offer or answer
+ // then perform SDP checking based on which stream disabled
+ pObserver->state = TestObserver::stateNoResponse;
+ ASSERT_EQ(pc->CreateAnswer(), NS_OK);
+ ASSERT_EQ(pObserver->state, TestObserver::stateSuccess);
+ ASSERT_EQ(signaling_state(), endState);
+
+ answer_ = pObserver->lastString;
+ if (!mBundleEnabled) {
+ answer_ = RemoveBundle(answer_);
+ }
+ }
+
+ // sets the answer to match the local description
+ // which isn't good if you are the offerer
+ void UpdateAnswer() {
+ answer_ = getLocalDescription();
+ if (!mBundleEnabled) {
+ answer_ = RemoveBundle(answer_);
+ }
+ }
+
+ void CreateOfferRemoveTrack(OfferOptions& options, bool videoTrack) {
+
+ RemoveTrack(0, videoTrack);
+
+ // Now call CreateOffer as JS would
+ pObserver->state = TestObserver::stateNoResponse;
+ ASSERT_EQ(pc->CreateOffer(options), NS_OK);
+ ASSERT_TRUE(pObserver->state == TestObserver::stateSuccess);
+ offer_ = pObserver->lastString;
+ if (!mBundleEnabled) {
+ offer_ = RemoveBundle(offer_);
+ }
+ }
+
+ void SetRemote(TestObserver::Action action, const std::string& remote,
+ bool ignoreError = false,
+ PCImplSignalingState endState =
+ PCImplSignalingState::SignalingInvalid) {
+
+ if (endState == PCImplSignalingState::SignalingInvalid) {
+ endState = (action == TestObserver::OFFER ?
+ PCImplSignalingState::SignalingHaveRemoteOffer :
+ PCImplSignalingState::SignalingStable);
+ }
+
+ pObserver->state = TestObserver::stateNoResponse;
+ ASSERT_EQ(pc->SetRemoteDescription(action, remote.c_str()), NS_OK);
+ ASSERT_EQ(signaling_state(), endState);
+ if (!ignoreError) {
+ ASSERT_EQ(pObserver->state, TestObserver::stateSuccess);
+ }
+
+ mRemoteDescriptionSet = true;
+ for (auto i = deferredCandidates_.begin();
+ i != deferredCandidates_.end();
+ ++i) {
+ AddIceCandidate(i->candidate.c_str(),
+ i->mid.c_str(),
+ i->level,
+ i->expectSuccess);
+ }
+ deferredCandidates_.clear();
+ }
+
+ void SetLocal(TestObserver::Action action, const std::string& local,
+ bool ignoreError = false,
+ PCImplSignalingState endState =
+ PCImplSignalingState::SignalingInvalid) {
+
+ if (endState == PCImplSignalingState::SignalingInvalid) {
+ endState = (action == TestObserver::OFFER ?
+ PCImplSignalingState::SignalingHaveLocalOffer :
+ PCImplSignalingState::SignalingStable);
+ }
+
+ pObserver->state = TestObserver::stateNoResponse;
+ ASSERT_EQ(pc->SetLocalDescription(action, local.c_str()), NS_OK);
+ ASSERT_EQ(signaling_state(), endState);
+ if (!ignoreError) {
+ ASSERT_EQ(pObserver->state, TestObserver::stateSuccess);
+ }
+ }
+
+ typedef enum {
+ NORMAL_ENCODING,
+ CHROME_ENCODING
+ } TrickleEncoding;
+
+ bool IceCompleted() {
+ return pc->IceConnectionState() == PCImplIceConnectionState::Connected;
+ }
+
+ void AddIceCandidateStr(const std::string& candidate, const std::string& mid,
+ unsigned short level) {
+ if (!mRemoteDescriptionSet) {
+ // Not time to add this, because the unit-test code hasn't set the
+ // description yet.
+ DeferredCandidate candidateStruct = {candidate, mid, level, true};
+ deferredCandidates_.push_back(candidateStruct);
+ } else {
+ AddIceCandidate(candidate, mid, level, true);
+ }
+ }
+
+ void AddIceCandidate(const std::string& candidate, const std::string& mid, unsigned short level,
+ bool expectSuccess) {
+ PCImplSignalingState endState = signaling_state();
+ pObserver->addIceCandidateState = TestObserver::stateNoResponse;
+ pc->AddIceCandidate(candidate.c_str(), mid.c_str(), level);
+ ASSERT_TRUE(pObserver->addIceCandidateState ==
+ expectSuccess ? TestObserver::stateSuccess :
+ TestObserver::stateError
+ );
+
+ // Verify that adding ICE candidates does not change the signaling state
+ ASSERT_EQ(signaling_state(), endState);
+ ASSERT_NE("", mid);
+ }
+
+ int GetPacketsReceived(const std::string& streamId) const
+ {
+ std::vector<DOMMediaStream *> streams = pObserver->GetStreams();
+
+ for (size_t i = 0; i < streams.size(); ++i) {
+ if (streams[i]->GetId() == streamId) {
+ return GetPacketsReceived(i);
+ }
+ }
+
+ EXPECT_TRUE(false);
+ return 0;
+ }
+
+ int GetPacketsReceived(size_t stream) const {
+ std::vector<DOMMediaStream *> streams = pObserver->GetStreams();
+
+ if (streams.size() <= stream) {
+ EXPECT_TRUE(false);
+ return 0;
+ }
+
+ return streams[stream]->GetStream()->AsSourceStream()->GetSegmentsAdded();
+ }
+
+ int GetPacketsSent(const std::string& streamId) const
+ {
+ for (size_t i = 0; i < domMediaStreams_.size(); ++i) {
+ if (domMediaStreams_[i]->GetId() == streamId) {
+ return GetPacketsSent(i);
+ }
+ }
+
+ EXPECT_TRUE(false);
+ return 0;
+ }
+
+ int GetPacketsSent(size_t stream) const {
+ if (stream >= domMediaStreams_.size()) {
+ EXPECT_TRUE(false);
+ return 0;
+ }
+
+ return static_cast<Fake_MediaStreamBase *>(
+ domMediaStreams_[stream]->GetStream())->GetSegmentsAdded();
+ }
+
+ //Stops generating new audio data for transmission.
+ //Should be called before Cleanup of the peer connection.
+ void CloseSendStreams() {
+ for (auto i = domMediaStreams_.begin(); i != domMediaStreams_.end(); ++i) {
+ static_cast<Fake_MediaStream*>((*i)->GetStream())->StopStream();
+ }
+ }
+
+ //Stops pulling audio data off the receivers.
+ //Should be called before Cleanup of the peer connection.
+ void CloseReceiveStreams() {
+ std::vector<DOMMediaStream *> streams =
+ pObserver->GetStreams();
+ for (size_t i = 0; i < streams.size(); i++) {
+ streams[i]->GetStream()->AsSourceStream()->StopStream();
+ }
+ }
+
+ // Right now we have no convenient way for this unit-test to learn the track
+ // ids of the tracks, so they can be queried later. We could either expose
+ // the JsepSessionImpl in some way, or we could parse the identifiers out of
+ // the SDP. For now, we just specify audio/video, since a given DOMMediaStream
+ // can have only one of each anyway. Once this is fixed, we will need to
+ // pass a real track id if we want to test that case.
+ RefPtr<mozilla::MediaPipeline> GetMediaPipeline(
+ bool local, size_t stream, bool video) {
+ SourceStreamInfo* streamInfo;
+ if (local) {
+ mozilla::SyncRunnable::DispatchToThread(
+ gMainThread, WrapRunnableRet(&streamInfo,
+ pc->media(), &PeerConnectionMedia::GetLocalStreamByIndex,
+ stream));
+ } else {
+ mozilla::SyncRunnable::DispatchToThread(
+ gMainThread, WrapRunnableRet(&streamInfo,
+ pc->media(), &PeerConnectionMedia::GetRemoteStreamByIndex,
+ stream));
+ }
+
+ if (!streamInfo) {
+ return nullptr;
+ }
+
+ const auto &pipelines = streamInfo->GetPipelines();
+
+ for (auto i = pipelines.begin(); i != pipelines.end(); ++i) {
+ if (i->second->IsVideo() == video) {
+ std::cout << "Got MediaPipeline " << i->second->trackid();
+ return i->second;
+ }
+ }
+ return nullptr;
+ }
+
+ void SetPeer(SignalingAgent* peer) {
+ pObserver->peerAgent = peer;
+ }
+
+public:
+ RefPtr<PCDispatchWrapper> pc;
+ RefPtr<TestObserver> pObserver;
+ std::string offer_;
+ std::string answer_;
+ std::vector<RefPtr<DOMMediaStream>> domMediaStreams_;
+ PeerConnectionConfiguration cfg_;
+ const std::string name;
+ bool mBundleEnabled;
+ VideoSessionConduit::FrameRequestType mExpectedFrameRequestType;
+ bool mExpectNack;
+ bool mExpectRtcpMuxAudio;
+ bool mExpectRtcpMuxVideo;
+ bool mRemoteDescriptionSet;
+
+ std::map<Msid, SdpMediaSection::MediaType> mAddedTracks;
+
+ typedef struct {
+ std::string candidate;
+ std::string mid;
+ uint16_t level;
+ bool expectSuccess;
+ } DeferredCandidate;
+
+ std::list<DeferredCandidate> deferredCandidates_;
+};
+
+static void AddIceCandidateToPeer(nsWeakPtr weak_observer,
+ uint16_t level,
+ const std::string &mid,
+ const std::string &cand) {
+ nsCOMPtr<nsISupportsWeakReference> tmp = do_QueryReferent(weak_observer);
+ if (!tmp) {
+ return;
+ }
+
+ RefPtr<nsSupportsWeakReference> tmp2 = do_QueryObject(tmp);
+ RefPtr<TestObserver> observer = static_cast<TestObserver*>(&*tmp2);
+
+ if (!observer) {
+ return;
+ }
+
+ observer->candidates.push_back(cand);
+
+ if (!observer->peerAgent || !observer->trickleCandidates) {
+ return;
+ }
+
+ observer->peerAgent->AddIceCandidateStr(cand, mid, level);
+}
+
+
+NS_IMETHODIMP
+TestObserver::OnIceCandidate(uint16_t level,
+ const char * mid,
+ const char * candidate, ER&)
+{
+ if (strlen(candidate) != 0) {
+ std::cerr << name << ": got candidate: " << candidate << std::endl;
+ // Forward back to myself to unwind stack.
+ nsWeakPtr weak_this = do_GetWeakReference(this);
+ gMainThread->Dispatch(
+ WrapRunnableNM(
+ &AddIceCandidateToPeer,
+ weak_this,
+ level,
+ std::string(mid),
+ std::string(candidate)),
+ NS_DISPATCH_NORMAL);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TestObserver::OnNegotiationNeeded(ER&)
+{
+ return NS_OK;
+}
+
+class SignalingEnvironment : public ::testing::Environment {
+ public:
+ void TearDown() {
+ // Signaling is shut down in XPCOM shutdown
+ }
+};
+
+class SignalingAgentTest : public ::testing::Test {
+ public:
+ static void SetUpTestCase() {
+ }
+
+ void TearDown() {
+ // Delete all the agents.
+ for (size_t i=0; i < agents_.size(); i++) {
+ delete agents_[i];
+ }
+ }
+
+ bool CreateAgent() {
+ return CreateAgent(g_stun_server_address, g_stun_server_port);
+ }
+
+ bool CreateAgent(const std::string stun_addr, uint16_t stun_port) {
+ UniquePtr<SignalingAgent> agent(
+ new SignalingAgent("agent", stun_addr, stun_port));
+
+ agent->Init();
+
+ agents_.push_back(agent.release());
+
+ return true;
+ }
+
+ void CreateAgentNoInit() {
+ UniquePtr<SignalingAgent> agent(new SignalingAgent("agent"));
+ agents_.push_back(agent.release());
+ }
+
+ SignalingAgent *agent(size_t i) {
+ return agents_[i];
+ }
+
+ private:
+ std::vector<SignalingAgent *> agents_;
+};
+
+
+class SignalingTest : public ::testing::Test,
+ public ::testing::WithParamInterface<std::string>
+{
+public:
+ SignalingTest()
+ : init_(false),
+ a1_(nullptr),
+ a2_(nullptr),
+ stun_addr_(g_stun_server_address),
+ stun_port_(g_stun_server_port) {}
+
+ SignalingTest(const std::string& stun_addr, uint16_t stun_port)
+ : a1_(nullptr),
+ a2_(nullptr),
+ stun_addr_(stun_addr),
+ stun_port_(stun_port) {}
+
+ ~SignalingTest() {
+ if (init_) {
+ mozilla::SyncRunnable::DispatchToThread(gMainThread,
+ WrapRunnable(this, &SignalingTest::Teardown_m));
+ }
+ }
+
+ void Teardown_m() {
+ a1_->SetPeer(nullptr);
+ a2_->SetPeer(nullptr);
+ }
+
+ static void SetUpTestCase() {
+ }
+
+ void EnsureInit() {
+
+ if (init_)
+ return;
+
+ a1_ = MakeUnique<SignalingAgent>(callerName, stun_addr_, stun_port_);
+ a2_ = MakeUnique<SignalingAgent>(calleeName, stun_addr_, stun_port_);
+
+ if (GetParam() == "no_bundle") {
+ a1_->SetBundleEnabled(false);
+ } else if(GetParam() == "reject_bundle") {
+ a2_->SetBundleEnabled(false);
+ } else if (GetParam() == "max-bundle") {
+ a1_->SetBundlePolicy(JsepBundlePolicy::kBundleMaxBundle);
+ a2_->SetBundlePolicy(JsepBundlePolicy::kBundleMaxBundle);
+ } else if (GetParam() == "balanced") {
+ a1_->SetBundlePolicy(JsepBundlePolicy::kBundleBalanced);
+ a2_->SetBundlePolicy(JsepBundlePolicy::kBundleBalanced);
+ } else if (GetParam() == "max-compat") {
+ a1_->SetBundlePolicy(JsepBundlePolicy::kBundleMaxCompat);
+ a2_->SetBundlePolicy(JsepBundlePolicy::kBundleMaxCompat);
+ }
+
+ a1_->Init();
+ a2_->Init();
+ a1_->SetPeer(a2_.get());
+ a2_->SetPeer(a1_.get());
+
+ init_ = true;
+ }
+
+ bool UseBundle()
+ {
+ return (GetParam() != "no_bundle") && (GetParam() != "reject_bundle");
+ }
+
+ void WaitForGather() {
+ a1_->WaitForGather();
+ a2_->WaitForGather();
+ }
+
+ static void TearDownTestCase() {
+ }
+
+ void CreateOffer(OfferOptions& options, uint32_t offerFlags) {
+ EnsureInit();
+ a1_->CreateOffer(options, offerFlags);
+ }
+
+ void CreateSetOffer(OfferOptions& options) {
+ EnsureInit();
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ }
+
+ // Home for checks that we cannot perform by inspecting the various signaling
+ // classes. We should endeavor to make this function disappear, since SDP
+ // checking does not belong in these tests. That's the job of
+ // jsep_session_unittest.
+ void SDPSanityCheck(const std::string& sdp, uint32_t flags, bool offer)
+ {
+ std::cout << "SDPSanityCheck flags for "
+ << (offer ? "offer" : "answer")
+ << " = " << std::hex << std::showbase
+ << flags << std::dec
+ << ((flags & HAS_ALL_CANDIDATES)?" HAS_ALL_CANDIDATES":"")
+ << std::endl;
+
+ if (flags & HAS_ALL_CANDIDATES) {
+ ASSERT_NE(std::string::npos, sdp.find("a=candidate"))
+ << "should have at least one candidate";
+ ASSERT_NE(std::string::npos, sdp.find("a=end-of-candidates"));
+ ASSERT_EQ(std::string::npos, sdp.find("c=IN IP4 0.0.0.0"));
+ }
+ }
+
+ void CheckPipelines()
+ {
+ std::cout << "Checking pipelines..." << std::endl;
+ for (auto it = a1_->mAddedTracks.begin();
+ it != a1_->mAddedTracks.end();
+ ++it) {
+ a1_->CheckLocalPipeline(it->first.streamId, it->first.trackId, it->second);
+ a2_->CheckRemotePipeline(it->first.streamId, it->first.trackId, it->second);
+ }
+
+ for (auto it = a2_->mAddedTracks.begin();
+ it != a2_->mAddedTracks.end();
+ ++it) {
+ a2_->CheckLocalPipeline(it->first.streamId, it->first.trackId, it->second);
+ a1_->CheckRemotePipeline(it->first.streamId, it->first.trackId, it->second);
+ }
+ std::cout << "Done checking pipelines." << std::endl;
+ }
+
+ void CheckStreams(SignalingAgent& sender, SignalingAgent& receiver)
+ {
+ for (auto it = sender.mAddedTracks.begin();
+ it != sender.mAddedTracks.end();
+ ++it) {
+ // No checking for video yet, since we don't have support for fake video
+ // here yet. (bug 1142320)
+ if (it->second == SdpMediaSection::kAudio) {
+ int sendExpect = sender.GetPacketsSent(it->first.streamId) + 2;
+ int receiveExpect = receiver.GetPacketsReceived(it->first.streamId) + 2;
+
+ // TODO: Once we support more than one of each track type per stream,
+ // this will need to be updated.
+ WAIT(sender.GetPacketsSent(it->first.streamId) >= sendExpect &&
+ receiver.GetPacketsReceived(it->first.streamId) >= receiveExpect,
+ kDefaultTimeout);
+ ASSERT_LE(sendExpect, sender.GetPacketsSent(it->first.streamId))
+ << "Local track " << it->first.streamId << "/" << it->first.trackId
+ << " is not sending audio segments.";
+ ASSERT_LE(receiveExpect, receiver.GetPacketsReceived(it->first.streamId))
+ << "Remote track " << it->first.streamId << "/" << it->first.trackId
+ << " is not receiving audio segments.";
+ }
+ }
+ }
+
+ void CheckStreams()
+ {
+ std::cout << "Checking streams..." << std::endl;
+ CheckStreams(*a1_, *a2_);
+ CheckStreams(*a2_, *a1_);
+ std::cout << "Done checking streams." << std::endl;
+ }
+
+ void Offer(OfferOptions& options,
+ uint32_t offerAnswerFlags,
+ TrickleType trickleType = BOTH_TRICKLE) {
+ EnsureInit();
+ a1_->CreateOffer(options, offerAnswerFlags);
+ bool trickle = !!(trickleType & OFFERER_TRICKLES);
+ if (!trickle) {
+ a1_->pObserver->trickleCandidates = false;
+ }
+ a2_->mRemoteDescriptionSet = false;
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ if (!trickle) {
+ a1_->WaitForGather();
+ a1_->UpdateOffer();
+ SDPSanityCheck(a1_->getLocalDescription(), HAS_ALL_CANDIDATES, true);
+ }
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+ }
+
+ void Answer(OfferOptions& options,
+ uint32_t offerAnswerFlags,
+ TrickleType trickleType = BOTH_TRICKLE) {
+
+ a2_->CreateAnswer(offerAnswerFlags);
+ bool trickle = !!(trickleType & ANSWERER_TRICKLES);
+ if (!trickle) {
+ a2_->pObserver->trickleCandidates = false;
+ }
+ a1_->mRemoteDescriptionSet = false;
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+ if (!trickle) {
+ a2_->WaitForGather();
+ a2_->UpdateAnswer();
+ SDPSanityCheck(a2_->getLocalDescription(), HAS_ALL_CANDIDATES, false);
+ }
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer());
+ }
+
+ void WaitForCompleted() {
+ ASSERT_TRUE_WAIT(a1_->IceCompleted() == true, kDefaultTimeout);
+ ASSERT_TRUE_WAIT(a2_->IceCompleted() == true, kDefaultTimeout);
+ }
+
+ void OfferAnswer(OfferOptions& options,
+ uint32_t offerAnswerFlags,
+ TrickleType trickleType = BOTH_TRICKLE) {
+ EnsureInit();
+ Offer(options, offerAnswerFlags, trickleType);
+ Answer(options, offerAnswerFlags, trickleType);
+ WaitForCompleted();
+ CheckPipelines();
+ CheckStreams();
+ }
+
+ void OfferAnswerTrickleChrome(OfferOptions& options,
+ uint32_t offerAnswerFlags) {
+ EnsureInit();
+ Offer(options, offerAnswerFlags);
+ Answer(options, offerAnswerFlags);
+ WaitForCompleted();
+ CheckPipelines();
+ CheckStreams();
+ }
+
+ void CreateOfferRemoveTrack(OfferOptions& options, bool videoTrack) {
+ EnsureInit();
+ OfferOptions aoptions;
+ aoptions.setInt32Option("OfferToReceiveAudio", 1);
+ aoptions.setInt32Option("OfferToReceiveVideo", 1);
+ a1_->CreateOffer(aoptions, OFFER_AV);
+ a1_->CreateOfferRemoveTrack(options, videoTrack);
+ }
+
+ void CreateOfferAudioOnly(OfferOptions& options) {
+ EnsureInit();
+ a1_->CreateOffer(options, OFFER_AUDIO);
+ }
+
+ void CreateOfferAddCandidate(OfferOptions& options,
+ const std::string& candidate, const std::string& mid,
+ unsigned short level) {
+ EnsureInit();
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->AddIceCandidate(candidate, mid, level, true);
+ }
+
+ void AddIceCandidateEarly(const std::string& candidate, const std::string& mid,
+ unsigned short level) {
+ EnsureInit();
+ a1_->AddIceCandidate(candidate, mid, level, false);
+ }
+
+ std::string SwapMsids(const std::string& sdp, bool swapVideo) const
+ {
+ SipccSdpParser parser;
+ UniquePtr<Sdp> parsed = parser.Parse(sdp);
+
+ SdpMediaSection* previousMsection = nullptr;
+ bool swapped = false;
+ for (size_t i = 0; i < parsed->GetMediaSectionCount(); ++i) {
+ SdpMediaSection* currentMsection = &parsed->GetMediaSection(i);
+ bool isVideo = currentMsection->GetMediaType() == SdpMediaSection::kVideo;
+ if (swapVideo == isVideo) {
+ if (previousMsection) {
+ UniquePtr<SdpMsidAttributeList> prevMsid(
+ new SdpMsidAttributeList(
+ previousMsection->GetAttributeList().GetMsid()));
+ UniquePtr<SdpMsidAttributeList> currMsid(
+ new SdpMsidAttributeList(
+ currentMsection->GetAttributeList().GetMsid()));
+ previousMsection->GetAttributeList().SetAttribute(currMsid.release());
+ currentMsection->GetAttributeList().SetAttribute(prevMsid.release());
+ swapped = true;
+ }
+ previousMsection = currentMsection;
+ }
+ }
+
+ EXPECT_TRUE(swapped);
+
+ return parsed->ToString();
+ }
+
+ void CheckRtcpFbSdp(const std::string &sdp,
+ const std::set<std::string>& expected) {
+
+ std::set<std::string>::const_iterator it;
+
+ // Iterate through the list of expected feedback types and ensure
+ // that none of them are missing.
+ for (it = expected.begin(); it != expected.end(); ++it) {
+ std::string attr = std::string("\r\na=rtcp-fb:120 ") + (*it) + "\r\n";
+ std::cout << " - Checking for a=rtcp-fb: '" << *it << "'" << std::endl;
+ ASSERT_NE(sdp.find(attr), std::string::npos);
+ }
+
+ // Iterate through all of the rtcp-fb lines in the SDP and ensure
+ // that all of them are expected.
+ ParsedSDP sdpWrapper(sdp);
+ std::vector<std::string> values = sdpWrapper.GetLines("a=rtcp-fb:120");
+ std::vector<std::string>::iterator it2;
+ for (it2 = values.begin(); it2 != values.end(); ++it2) {
+ std::cout << " - Verifying that rtcp-fb is okay: '" << *it2
+ << "'" << std::endl;
+ ASSERT_NE(0U, expected.count(*it2));
+ }
+ }
+
+ std::string HardcodeRtcpFb(const std::string& sdp,
+ const std::set<std::string>& feedback) {
+ ParsedSDP sdpWrapper(sdp);
+
+ // Strip out any existing rtcp-fb lines
+ sdpWrapper.DeleteLines("a=rtcp-fb:120");
+ sdpWrapper.DeleteLines("a=rtcp-fb:126");
+ sdpWrapper.DeleteLines("a=rtcp-fb:97");
+
+ // Add rtcp-fb lines for the desired feedback types
+ // We know that the video section is generated second (last),
+ // so appending these to the end of the SDP has the desired effect.
+ std::set<std::string>::const_iterator it;
+ for (it = feedback.begin(); it != feedback.end(); ++it) {
+ sdpWrapper.AddLine(std::string("a=rtcp-fb:120 ") + (*it) + "\r\n");
+ sdpWrapper.AddLine(std::string("a=rtcp-fb:126 ") + (*it) + "\r\n");
+ sdpWrapper.AddLine(std::string("a=rtcp-fb:97 ") + (*it) + "\r\n");
+ }
+
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+
+ // Double-check that the offered SDP matches what we expect
+ CheckRtcpFbSdp(sdpWrapper.getSdp(), feedback);
+
+ return sdpWrapper.getSdp();
+ }
+
+ void TestRtcpFbAnswer(const std::set<std::string>& feedback,
+ bool expectNack,
+ VideoSessionConduit::FrameRequestType frameRequestType) {
+ EnsureInit();
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+ a2_->CreateAnswer(OFFER_AV | ANSWER_AV);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+
+ std::string modifiedAnswer(HardcodeRtcpFb(a2_->answer(), feedback));
+
+ a1_->SetRemote(TestObserver::ANSWER, modifiedAnswer);
+
+ a1_->SetExpectedFrameRequestType(frameRequestType);
+ a1_->mExpectNack = expectNack;
+ // Since we don't support rewriting rtcp-fb in answers, a2 still thinks it
+ // will be doing all of the normal rtcp-fb
+
+ WaitForCompleted();
+ CheckPipelines();
+
+ CloseStreams();
+ }
+
+ void TestRtcpFbOffer(
+ const std::set<std::string>& feedback,
+ bool expectNack,
+ VideoSessionConduit::FrameRequestType frameRequestType) {
+ EnsureInit();
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+
+ std::string modifiedOffer = HardcodeRtcpFb(a1_->offer(), feedback);
+
+ a2_->SetRemote(TestObserver::OFFER, modifiedOffer);
+ a1_->SetExpectedFrameRequestType(frameRequestType);
+ a1_->mExpectNack = expectNack;
+ a2_->SetExpectedFrameRequestType(frameRequestType);
+ a2_->mExpectNack = expectNack;
+
+ a2_->CreateAnswer(OFFER_AV | ANSWER_AV);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer());
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CloseStreams();
+ }
+
+ void SetTestStunServer() {
+ stun_addr_ = TestStunServer::GetInstance()->addr();
+ stun_port_ = TestStunServer::GetInstance()->port();
+
+ TestStunServer::GetInstance()->SetActive(false);
+ TestStunServer::GetInstance()->SetResponseAddr(
+ kBogusSrflxAddress, kBogusSrflxPort);
+ }
+
+ // Check max-fs and max-fr in SDP
+ void CheckMaxFsFrSdp(const std::string sdp,
+ int format,
+ int max_fs,
+ int max_fr) {
+ ParsedSDP sdpWrapper(sdp);
+ std::stringstream ss;
+ ss << "a=fmtp:" << format;
+ std::vector<std::string> lines = sdpWrapper.GetLines(ss.str());
+
+ // Both max-fs and max-fr not exist
+ if (lines.empty()) {
+ ASSERT_EQ(max_fs, 0);
+ ASSERT_EQ(max_fr, 0);
+ return;
+ }
+
+ // At most one instance allowed for each format
+ ASSERT_EQ(lines.size(), 1U);
+
+ std::string line = lines.front();
+
+ // Make sure that max-fs doesn't exist
+ if (max_fs == 0) {
+ ASSERT_EQ(line.find("max-fs="), std::string::npos);
+ }
+ // Check max-fs value
+ if (max_fs > 0) {
+ std::stringstream ss;
+ ss << "max-fs=" << max_fs;
+ ASSERT_NE(line.find(ss.str()), std::string::npos);
+ }
+ // Make sure that max-fr doesn't exist
+ if (max_fr == 0) {
+ ASSERT_EQ(line.find("max-fr="), std::string::npos);
+ }
+ // Check max-fr value
+ if (max_fr > 0) {
+ std::stringstream ss;
+ ss << "max-fr=" << max_fr;
+ ASSERT_NE(line.find(ss.str()), std::string::npos);
+ }
+ }
+
+ void CloseStreams()
+ {
+ a1_->CloseSendStreams();
+ a2_->CloseSendStreams();
+ a1_->CloseReceiveStreams();
+ a2_->CloseReceiveStreams();
+ }
+
+ protected:
+ bool init_;
+ UniquePtr<SignalingAgent> a1_; // Canonically "caller"
+ UniquePtr<SignalingAgent> a2_; // Canonically "callee"
+ std::string stun_addr_;
+ uint16_t stun_port_;
+};
+
+static void SetIntPrefOnMainThread(nsCOMPtr<nsIPrefBranch> prefs,
+ const char *pref_name,
+ int new_value) {
+ MOZ_ASSERT(NS_IsMainThread());
+ prefs->SetIntPref(pref_name, new_value);
+}
+
+static void SetMaxFsFr(nsCOMPtr<nsIPrefBranch> prefs,
+ int max_fs,
+ int max_fr) {
+ gMainThread->Dispatch(
+ WrapRunnableNM(SetIntPrefOnMainThread,
+ prefs,
+ "media.navigator.video.max_fs",
+ max_fs),
+ NS_DISPATCH_SYNC);
+
+ gMainThread->Dispatch(
+ WrapRunnableNM(SetIntPrefOnMainThread,
+ prefs,
+ "media.navigator.video.max_fr",
+ max_fr),
+ NS_DISPATCH_SYNC);
+}
+
+class FsFrPrefClearer {
+ public:
+ explicit FsFrPrefClearer(nsCOMPtr<nsIPrefBranch> prefs): mPrefs(prefs) {}
+ ~FsFrPrefClearer() {
+ gMainThread->Dispatch(
+ WrapRunnableNM(FsFrPrefClearer::ClearUserPrefOnMainThread,
+ mPrefs,
+ "media.navigator.video.max_fs"),
+ NS_DISPATCH_SYNC);
+ gMainThread->Dispatch(
+ WrapRunnableNM(FsFrPrefClearer::ClearUserPrefOnMainThread,
+ mPrefs,
+ "media.navigator.video.max_fr"),
+ NS_DISPATCH_SYNC);
+ }
+
+ static void ClearUserPrefOnMainThread(nsCOMPtr<nsIPrefBranch> prefs,
+ const char *pref_name) {
+ MOZ_ASSERT(NS_IsMainThread());
+ prefs->ClearUserPref(pref_name);
+ }
+ private:
+ nsCOMPtr<nsIPrefBranch> mPrefs;
+};
+
+TEST_P(SignalingTest, JustInit)
+{
+}
+
+TEST_P(SignalingTest, CreateSetOffer)
+{
+ OfferOptions options;
+ CreateSetOffer(options);
+}
+
+TEST_P(SignalingTest, CreateOfferAudioVideoOptionUndefined)
+{
+ OfferOptions options;
+ CreateOffer(options, OFFER_AV);
+}
+
+TEST_P(SignalingTest, CreateOfferNoVideoStreamRecvVideo)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ CreateOffer(options, OFFER_AUDIO);
+}
+
+TEST_P(SignalingTest, CreateOfferNoAudioStreamRecvAudio)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ CreateOffer(options, OFFER_VIDEO);
+}
+
+TEST_P(SignalingTest, CreateOfferNoVideoStream)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 0);
+ CreateOffer(options, OFFER_AUDIO);
+}
+
+TEST_P(SignalingTest, CreateOfferNoAudioStream)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 0);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ CreateOffer(options, OFFER_VIDEO);
+}
+
+TEST_P(SignalingTest, CreateOfferDontReceiveAudio)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 0);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ CreateOffer(options, OFFER_AV);
+}
+
+TEST_P(SignalingTest, CreateOfferDontReceiveVideo)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 0);
+ CreateOffer(options, OFFER_AV);
+}
+
+TEST_P(SignalingTest, CreateOfferRemoveAudioTrack)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ CreateOfferRemoveTrack(options, false);
+}
+
+TEST_P(SignalingTest, CreateOfferDontReceiveAudioRemoveAudioTrack)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 0);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ CreateOfferRemoveTrack(options, false);
+}
+
+TEST_P(SignalingTest, CreateOfferDontReceiveVideoRemoveVideoTrack)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 0);
+ CreateOfferRemoveTrack(options, true);
+}
+
+TEST_P(SignalingTest, OfferAnswerNothingDisabled)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+}
+
+TEST_P(SignalingTest, OfferAnswerNoTrickle)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV, NO_TRICKLE);
+}
+
+TEST_P(SignalingTest, OfferAnswerOffererTrickles)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV, OFFERER_TRICKLES);
+}
+
+TEST_P(SignalingTest, OfferAnswerAnswererTrickles)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV, ANSWERER_TRICKLES);
+}
+
+TEST_P(SignalingTest, OfferAnswerBothTrickle)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV, BOTH_TRICKLE);
+}
+
+TEST_P(SignalingTest, OfferAnswerAudioBothTrickle)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AUDIO | ANSWER_AUDIO, BOTH_TRICKLE);
+}
+
+
+TEST_P(SignalingTest, OfferAnswerNothingDisabledFullCycle)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+ // verify the default codec priorities
+ ASSERT_NE(a1_->getLocalDescription().find("UDP/TLS/RTP/SAVPF 109 9 0 8\r"),
+ std::string::npos);
+ ASSERT_NE(a2_->getLocalDescription().find("UDP/TLS/RTP/SAVPF 109\r"),
+ std::string::npos);
+}
+
+TEST_P(SignalingTest, OfferAnswerAudioInactive)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ OfferAnswer(options, OFFER_VIDEO | ANSWER_VIDEO);
+}
+
+TEST_P(SignalingTest, OfferAnswerVideoInactive)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ OfferAnswer(options, OFFER_AUDIO | ANSWER_AUDIO);
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, CreateOfferAddCandidate)
+{
+ OfferOptions options;
+ CreateOfferAddCandidate(options, strSampleCandidate,
+ strSampleMid, nSamplelevel);
+}
+
+TEST_P(SignalingTest, AddIceCandidateEarly)
+{
+ OfferOptions options;
+ AddIceCandidateEarly(strSampleCandidate,
+ strSampleMid, nSamplelevel);
+}
+
+TEST_P(SignalingTest, OfferAnswerDontAddAudioStreamOnAnswerNoOptions)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ OfferAnswer(options, OFFER_AV | ANSWER_VIDEO);
+}
+
+TEST_P(SignalingTest, OfferAnswerDontAddVideoStreamOnAnswerNoOptions)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ OfferAnswer(options, OFFER_AV | ANSWER_AUDIO);
+}
+
+TEST_P(SignalingTest, OfferAnswerDontAddAudioVideoStreamsOnAnswerNoOptions)
+{
+ OfferOptions options;
+ options.setInt32Option("OfferToReceiveAudio", 1);
+ options.setInt32Option("OfferToReceiveVideo", 1);
+ OfferAnswer(options, OFFER_AV | ANSWER_NONE);
+}
+
+TEST_P(SignalingTest, RenegotiationOffererAddsTracks)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+ // OFFER_AV causes a new stream + tracks to be added
+ OfferAnswer(options, OFFER_AV);
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationOffererRemovesTrack)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ a1_->RemoveTrack(0, false);
+
+ OfferAnswer(options, OFFER_NONE);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationBothRemoveThenAddTrack)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ a1_->RemoveTrack(0, false);
+ a2_->RemoveTrack(0, false);
+
+ OfferAnswer(options, OFFER_NONE);
+
+ // OFFER_AUDIO causes a new audio track to be added on both sides
+ OfferAnswer(options, OFFER_AUDIO);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationOffererReplacesTrack)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ a1_->RemoveTrack(0, false);
+
+ // OFFER_AUDIO causes a new audio track to be added on both sides
+ OfferAnswer(options, OFFER_AUDIO);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationOffererSwapsMsids)
+{
+ OfferOptions options;
+
+ EnsureInit();
+ a1_->AddStream(DOMMediaStream::HINT_CONTENTS_AUDIO |
+ DOMMediaStream::HINT_CONTENTS_VIDEO);
+
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ a1_->CreateOffer(options, OFFER_NONE);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ std::string audioSwapped = SwapMsids(a1_->offer(), false);
+ std::string audioAndVideoSwapped = SwapMsids(audioSwapped, true);
+ std::cout << "Msids swapped: " << std::endl << audioAndVideoSwapped << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, audioAndVideoSwapped);
+ Answer(options, OFFER_NONE, BOTH_TRICKLE);
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationAnswererAddsTracks)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ options.setInt32Option("OfferToReceiveAudio", 2);
+ options.setInt32Option("OfferToReceiveVideo", 2);
+
+ // ANSWER_AV causes a new stream + tracks to be added
+ OfferAnswer(options, ANSWER_AV);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationAnswererRemovesTrack)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ a2_->RemoveTrack(0, false);
+
+ OfferAnswer(options, OFFER_NONE);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RenegotiationAnswererReplacesTrack)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ a2_->RemoveTrack(0, false);
+
+ // ANSWER_AUDIO causes a new audio track to be added
+ OfferAnswer(options, ANSWER_AUDIO);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, BundleRenegotiation)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ // If we did bundle before, turn it off, if not, turn it on
+ if (a1_->mBundleEnabled && a2_->mBundleEnabled) {
+ a1_->SetBundleEnabled(false);
+ } else {
+ a1_->SetBundleEnabled(true);
+ a2_->SetBundleEnabled(true);
+ }
+
+ OfferAnswer(options, OFFER_NONE);
+}
+
+TEST_P(SignalingTest, FullCallAudioOnly)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AUDIO | ANSWER_AUDIO);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, FullCallVideoOnly)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_VIDEO | ANSWER_VIDEO);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, OfferAndAnswerWithExtraCodec)
+{
+ EnsureInit();
+ OfferOptions options;
+ Offer(options, OFFER_AUDIO);
+
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+ ParsedSDP sdpWrapper(a2_->answer());
+ sdpWrapper.ReplaceLine("m=audio",
+ "m=audio 65375 UDP/TLS/RTP/SAVPF 109 8\r\n");
+ sdpWrapper.AddLine("a=rtpmap:8 PCMA/8000\r\n");
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+
+ a1_->SetRemote(TestObserver::ANSWER, sdpWrapper.getSdp());
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, FullCallTrickle)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ std::cerr << "ICE handshake completed" << std::endl;
+
+ CloseStreams();
+}
+
+// Offer answer with trickle but with chrome-style candidates
+TEST_P(SignalingTest, DISABLED_FullCallTrickleChrome)
+{
+ OfferOptions options;
+ OfferAnswerTrickleChrome(options, OFFER_AV | ANSWER_AV);
+
+ std::cerr << "ICE handshake completed" << std::endl;
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, FullCallTrickleBeforeSetLocal)
+{
+ OfferOptions options;
+ Offer(options, OFFER_AV | ANSWER_AV);
+ // ICE will succeed even if one side fails to trickle, so we need to disable
+ // one side before performing a test that might cause candidates to be
+ // dropped
+ a2_->DropOutgoingTrickleCandidates();
+ // Wait until all of a1's candidates have been trickled to a2, _before_ a2
+ // has called CreateAnswer/SetLocal (ie; the ICE stack is not running yet)
+ a1_->WaitForGather();
+ Answer(options, OFFER_AV | ANSWER_AV);
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ std::cerr << "ICE handshake completed" << std::endl;
+
+ CloseStreams();
+}
+
+// This test comes from Bug 810220
+// TODO: Move this to jsep_session_unittest
+TEST_P(SignalingTest, AudioOnlyG711Call)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ const std::string& offer(strG711SdpOffer);
+
+ std::cout << "Setting offer to:" << std::endl << indent(offer) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, offer);
+
+ std::cout << "Creating answer:" << std::endl;
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ std::string answer = a2_->answer();
+
+ // They didn't offer opus, so our answer shouldn't include it.
+ ASSERT_EQ(answer.find(" opus/"), std::string::npos);
+
+ // They also didn't offer video or application
+ ASSERT_EQ(answer.find("video"), std::string::npos);
+ ASSERT_EQ(answer.find("application"), std::string::npos);
+
+ // We should answer with PCMU and telephone-event
+ ASSERT_NE(answer.find(" PCMU/8000"), std::string::npos);
+
+ // Double-check the directionality
+ ASSERT_NE(answer.find("\r\na=sendrecv"), std::string::npos);
+
+}
+
+TEST_P(SignalingTest, IncomingOfferIceLite)
+{
+ EnsureInit();
+
+ std::string offer =
+ "v=0\r\n"
+ "o=- 1936463 1936463 IN IP4 148.147.200.251\r\n"
+ "s=-\r\n"
+ "c=IN IP4 148.147.200.251\r\n"
+ "t=0 0\r\n"
+ "a=ice-lite\r\n"
+ "a=fingerprint:sha-1 "
+ "E7:FA:17:DA:3F:3C:1E:D8:E4:9C:8C:4C:13:B9:2E:D5:C6:78:AB:B3\r\n"
+ "m=audio 40014 UDP/TLS/RTP/SAVPF 8 0 101\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:101 telephone-event/8000\r\n"
+ "a=fmtp:101 0-15\r\n"
+ "a=ptime:20\r\n"
+ "a=sendrecv\r\n"
+ "a=ice-ufrag:bf2LAgqBZdiWFR2r\r\n"
+ "a=ice-pwd:ScxgaNzdBOYScR0ORleAvt1x\r\n"
+ "a=candidate:1661181211 1 udp 10 148.147.200.251 40014 typ host\r\n"
+ "a=candidate:1661181211 2 udp 9 148.147.200.251 40015 typ host\r\n"
+ "a=setup:actpass\r\n";
+
+ std::cout << "Setting offer to:" << std::endl << indent(offer) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, offer);
+
+ std::cout << "Creating answer:" << std::endl;
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+
+ ASSERT_EQ(a2_->pc->media()->ice_ctx()->GetControlling(),
+ NrIceCtx::ICE_CONTROLLING);
+}
+
+// This test comes from Bug814038
+TEST_P(SignalingTest, ChromeOfferAnswer)
+{
+ EnsureInit();
+
+ // This is captured SDP from an early interop attempt with Chrome.
+ std::string offer =
+ "v=0\r\n"
+ "o=- 1713781661 2 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=group:BUNDLE audio video\r\n"
+
+ "m=audio 1 UDP/TLS/RTP/SAVPF 103 104 111 0 8 107 106 105 13 126\r\n"
+ "a=fingerprint:sha-1 4A:AD:B9:B1:3F:82:18:3B:54:02:12:DF:3E:"
+ "5D:49:6B:19:E5:7C:AB\r\n"
+ "a=setup:active\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtcp:1 IN IP4 0.0.0.0\r\n"
+ "a=ice-ufrag:lBrbdDfrVBH1cldN\r\n"
+ "a=ice-pwd:rzh23jet4QpCaEoj9Sl75pL3\r\n"
+ "a=ice-options:google-ice\r\n"
+ "a=sendrecv\r\n"
+ "a=mid:audio\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:"
+ "RzrYlzpkTsvgYFD1hQqNCzQ7y4emNLKI1tODsjim\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=rtpmap:104 ISAC/32000\r\n"
+ // NOTE: the actual SDP that Chrome sends at the moment
+ // doesn't indicate two channels. I've amended their SDP
+ // here, under the assumption that the constraints
+ // described in draft-spittka-payload-rtp-opus will
+ // eventually be implemented by Google.
+ "a=rtpmap:111 opus/48000/2\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:107 CN/48000\r\n"
+ "a=rtpmap:106 CN/32000\r\n"
+ "a=rtpmap:105 CN/16000\r\n"
+ "a=rtpmap:13 CN/8000\r\n"
+ "a=rtpmap:126 telephone-event/8000\r\n"
+ "a=ssrc:661333377 cname:KIXaNxUlU5DP3fVS\r\n"
+ "a=ssrc:661333377 msid:A5UL339RyGxT7zwgyF12BFqesxkmbUsaycp5 a0\r\n"
+ "a=ssrc:661333377 mslabel:A5UL339RyGxT7zwgyF12BFqesxkmbUsaycp5\r\n"
+ "a=ssrc:661333377 label:A5UL339RyGxT7zwgyF12BFqesxkmbUsaycp5a0\r\n"
+
+ "m=video 1 UDP/TLS/RTP/SAVPF 100 101 102\r\n"
+ "a=fingerprint:sha-1 4A:AD:B9:B1:3F:82:18:3B:54:02:12:DF:3E:5D:49:"
+ "6B:19:E5:7C:AB\r\n"
+ "a=setup:active\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtcp:1 IN IP4 0.0.0.0\r\n"
+ "a=ice-ufrag:lBrbdDfrVBH1cldN\r\n"
+ "a=ice-pwd:rzh23jet4QpCaEoj9Sl75pL3\r\n"
+ "a=ice-options:google-ice\r\n"
+ "a=sendrecv\r\n"
+ "a=mid:video\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:"
+ "RzrYlzpkTsvgYFD1hQqNCzQ7y4emNLKI1tODsjim\r\n"
+ "a=rtpmap:100 VP8/90000\r\n"
+ "a=rtpmap:101 red/90000\r\n"
+ "a=rtpmap:102 ulpfec/90000\r\n"
+ "a=rtcp-fb:100 nack\r\n"
+ "a=rtcp-fb:100 ccm fir\r\n"
+ "a=ssrc:3012607008 cname:KIXaNxUlU5DP3fVS\r\n"
+ "a=ssrc:3012607008 msid:A5UL339RyGxT7zwgyF12BFqesxkmbUsaycp5 v0\r\n"
+ "a=ssrc:3012607008 mslabel:A5UL339RyGxT7zwgyF12BFqesxkmbUsaycp5\r\n"
+ "a=ssrc:3012607008 label:A5UL339RyGxT7zwgyF12BFqesxkmbUsaycp5v0\r\n";
+
+
+ std::cout << "Setting offer to:" << std::endl << indent(offer) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, offer);
+
+ std::cout << "Creating answer:" << std::endl;
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ std::string answer = a2_->answer();
+}
+
+
+TEST_P(SignalingTest, FullChromeHandshake)
+{
+ EnsureInit();
+
+ std::string offer = "v=0\r\n"
+ "o=- 3835809413 2 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=group:BUNDLE audio video\r\n"
+ "a=msid-semantic: WMS ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOH\r\n"
+ "m=audio 1 UDP/TLS/RTP/SAVPF 103 104 111 0 8 107 106 105 13 126\r\n"
+ "c=IN IP4 1.1.1.1\r\n"
+ "a=rtcp:1 IN IP4 1.1.1.1\r\n"
+ "a=ice-ufrag:jz9UBk9RT8eCQXiL\r\n"
+ "a=ice-pwd:iscXxsdU+0gracg0g5D45orx\r\n"
+ "a=ice-options:google-ice\r\n"
+ "a=fingerprint:sha-256 A8:76:8C:4C:FA:2E:67:D7:F8:1D:28:4E:90:24:04:"
+ "12:EB:B4:A6:69:3D:05:92:E4:91:C3:EA:F9:B7:54:D3:09\r\n"
+ "a=setup:active\r\n"
+ "a=sendrecv\r\n"
+ "a=mid:audio\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:/he/v44FKu/QvEhex86zV0pdn2V"
+ "4Y7wB2xaZ8eUy\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=rtpmap:104 ISAC/32000\r\n"
+ "a=rtpmap:111 opus/48000/2\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:107 CN/48000\r\n"
+ "a=rtpmap:106 CN/32000\r\n"
+ "a=rtpmap:105 CN/16000\r\n"
+ "a=rtpmap:13 CN/8000\r\n"
+ "a=rtpmap:126 telephone-event/8000\r\n"
+ "a=ssrc:3389377748 cname:G5I+Jxz4rcaq8IIK\r\n"
+ "a=ssrc:3389377748 msid:ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOH a0\r\n"
+ "a=ssrc:3389377748 mslabel:ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOH\r\n"
+ "a=ssrc:3389377748 label:ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOHa0\r\n"
+ "m=video 1 UDP/TLS/RTP/SAVPF 100 116 117\r\n"
+ "c=IN IP4 1.1.1.1\r\n"
+ "a=rtcp:1 IN IP4 1.1.1.1\r\n"
+ "a=ice-ufrag:jz9UBk9RT8eCQXiL\r\n"
+ "a=ice-pwd:iscXxsdU+0gracg0g5D45orx\r\n"
+ "a=ice-options:google-ice\r\n"
+ "a=fingerprint:sha-256 A8:76:8C:4C:FA:2E:67:D7:F8:1D:28:4E:90:24:04:"
+ "12:EB:B4:A6:69:3D:05:92:E4:91:C3:EA:F9:B7:54:D3:09\r\n"
+ "a=setup:active\r\n"
+ "a=sendrecv\r\n"
+ "a=mid:video\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:/he/v44FKu/QvEhex86zV0pdn2V"
+ "4Y7wB2xaZ8eUy\r\n"
+ "a=rtpmap:100 VP8/90000\r\n"
+ "a=rtpmap:116 red/90000\r\n"
+ "a=rtpmap:117 ulpfec/90000\r\n"
+ "a=ssrc:3613537198 cname:G5I+Jxz4rcaq8IIK\r\n"
+ "a=ssrc:3613537198 msid:ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOH v0\r\n"
+ "a=ssrc:3613537198 mslabel:ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOH\r\n"
+ "a=ssrc:3613537198 label:ahheYQXHFU52slYMrWNtKUyHCtWZsOJgjlOHv0\r\n";
+
+ std::cout << "Setting offer to:" << std::endl << indent(offer) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, offer);
+
+ std::cout << "Creating answer:" << std::endl;
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ std::cout << "Setting answer" << std::endl;
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+
+ std::string answer = a2_->answer();
+ ASSERT_NE(answer.find("111 opus/"), std::string::npos);
+}
+
+// Disabled pending resolution of bug 818640.
+// Actually, this test is completely broken; you can't just call
+// SetRemote/CreateAnswer over and over again.
+// If we were to test this sort of thing, it would belong in
+// jsep_session_unitest
+TEST_P(SignalingTest, DISABLED_OfferAllDynamicTypes)
+{
+ EnsureInit();
+
+ std::string offer;
+ for (int i = 96; i < 128; i++)
+ {
+ std::stringstream ss;
+ ss << i;
+ std::cout << "Trying dynamic pt = " << i << std::endl;
+ offer =
+ "v=0\r\n"
+ "o=- 1 1 IN IP4 148.147.200.251\r\n"
+ "s=-\r\n"
+ "b=AS:64\r\n"
+ "t=0 0\r\n"
+ "a=fingerprint:sha-256 F3:FA:20:C0:CD:48:C4:5F:02:5F:A5:D3:21:D0:2D:48:"
+ "7B:31:60:5C:5A:D8:0D:CD:78:78:6C:6D:CE:CC:0C:67\r\n"
+ "m=audio 9000 RTP/AVP " + ss.str() + "\r\n"
+ "c=IN IP4 148.147.200.251\r\n"
+ "b=TIAS:64000\r\n"
+ "a=rtpmap:" + ss.str() +" opus/48000/2\r\n"
+ "a=candidate:0 1 udp 2130706432 148.147.200.251 9000 typ host\r\n"
+ "a=candidate:0 2 udp 2130706432 148.147.200.251 9005 typ host\r\n"
+ "a=ice-ufrag:cYuakxkEKH+RApYE\r\n"
+ "a=ice-pwd:bwtpzLZD+3jbu8vQHvEa6Xuq\r\n"
+ "a=sendrecv\r\n";
+
+ /*
+ std::cout << "Setting offer to:" << std::endl
+ << indent(offer) << std::endl;
+ */
+ a2_->SetRemote(TestObserver::OFFER, offer);
+
+ //std::cout << "Creating answer:" << std::endl;
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ std::string answer = a2_->answer();
+
+ ASSERT_NE(answer.find(ss.str() + " opus/"), std::string::npos);
+ }
+
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, ipAddrAnyOffer)
+{
+ EnsureInit();
+
+ std::string offer =
+ "v=0\r\n"
+ "o=- 1 1 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "b=AS:64\r\n"
+ "t=0 0\r\n"
+ "a=fingerprint:sha-256 F3:FA:20:C0:CD:48:C4:5F:02:5F:A5:D3:21:D0:2D:48:"
+ "7B:31:60:5C:5A:D8:0D:CD:78:78:6C:6D:CE:CC:0C:67\r\n"
+ "m=audio 9000 RTP/AVP 99\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtpmap:99 opus/48000/2\r\n"
+ "a=ice-ufrag:cYuakxkEKH+RApYE\r\n"
+ "a=ice-pwd:bwtpzLZD+3jbu8vQHvEa6Xuq\r\n"
+ "a=setup:active\r\n"
+ "a=sendrecv\r\n";
+
+ a2_->SetRemote(TestObserver::OFFER, offer);
+ ASSERT_TRUE(a2_->pObserver->state == TestObserver::stateSuccess);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ ASSERT_TRUE(a2_->pObserver->state == TestObserver::stateSuccess);
+ std::string answer = a2_->answer();
+ ASSERT_NE(answer.find("a=sendrecv"), std::string::npos);
+}
+
+static void CreateSDPForBigOTests(std::string& offer, const std::string& number) {
+ offer =
+ "v=0\r\n"
+ "o=- ";
+ offer += number;
+ offer += " ";
+ offer += number;
+ offer += " IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "b=AS:64\r\n"
+ "t=0 0\r\n"
+ "a=fingerprint:sha-256 F3:FA:20:C0:CD:48:C4:5F:02:5F:A5:D3:21:D0:2D:48:"
+ "7B:31:60:5C:5A:D8:0D:CD:78:78:6C:6D:CE:CC:0C:67\r\n"
+ "m=audio 9000 RTP/AVP 99\r\n"
+ "c=IN IP4 0.0.0.0\r\n"
+ "a=rtpmap:99 opus/48000/2\r\n"
+ "a=ice-ufrag:cYuakxkEKH+RApYE\r\n"
+ "a=ice-pwd:bwtpzLZD+3jbu8vQHvEa6Xuq\r\n"
+ "a=setup:active\r\n"
+ "a=sendrecv\r\n";
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, BigOValues)
+{
+ EnsureInit();
+
+ std::string offer;
+
+ CreateSDPForBigOTests(offer, "12345678901234567");
+
+ a2_->SetRemote(TestObserver::OFFER, offer);
+ ASSERT_EQ(a2_->pObserver->state, TestObserver::stateSuccess);
+}
+
+// TODO: Move to jsep_session_unittest
+// We probably need to retain at least one test case for each API entry point
+// that verifies that errors are propagated correctly, though.
+TEST_P(SignalingTest, BigOValuesExtraChars)
+{
+ EnsureInit();
+
+ std::string offer;
+
+ CreateSDPForBigOTests(offer, "12345678901234567FOOBAR");
+
+ // The signaling state will remain "stable" because the unparsable
+ // SDP leads to a failure in SetRemoteDescription.
+ a2_->SetRemote(TestObserver::OFFER, offer, true,
+ PCImplSignalingState::SignalingStable);
+ ASSERT_TRUE(a2_->pObserver->state == TestObserver::stateError);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, BigOValuesTooBig)
+{
+ EnsureInit();
+
+ std::string offer;
+
+ CreateSDPForBigOTests(offer, "18446744073709551615");
+
+ // The signaling state will remain "stable" because the unparsable
+ // SDP leads to a failure in SetRemoteDescription.
+ a2_->SetRemote(TestObserver::OFFER, offer, true,
+ PCImplSignalingState::SignalingStable);
+ ASSERT_TRUE(a2_->pObserver->state == TestObserver::stateError);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, SetLocalAnswerInStable)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+
+ // The signaling state will remain "stable" because the
+ // SetLocalDescription call fails.
+ a1_->SetLocal(TestObserver::ANSWER, a1_->offer(), true,
+ PCImplSignalingState::SignalingStable);
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, SetRemoteAnswerInStable) {
+ EnsureInit();
+
+ // The signaling state will remain "stable" because the
+ // SetRemoteDescription call fails.
+ a1_->SetRemote(TestObserver::ANSWER, strSampleSdpAudioVideoNoIce, true,
+ PCImplSignalingState::SignalingStable);
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, SetLocalAnswerInHaveLocalOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ // The signaling state will remain "have-local-offer" because the
+ // SetLocalDescription call fails.
+ a1_->SetLocal(TestObserver::ANSWER, a1_->offer(), true,
+ PCImplSignalingState::SignalingHaveLocalOffer);
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, SetRemoteOfferInHaveLocalOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ // The signaling state will remain "have-local-offer" because the
+ // SetRemoteDescription call fails.
+ a1_->SetRemote(TestObserver::OFFER, a1_->offer(), true,
+ PCImplSignalingState::SignalingHaveLocalOffer);
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, SetLocalOfferInHaveRemoteOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ // The signaling state will remain "have-remote-offer" because the
+ // SetLocalDescription call fails.
+ a2_->SetLocal(TestObserver::OFFER, a1_->offer(), true,
+ PCImplSignalingState::SignalingHaveRemoteOffer);
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// TODO: Move to jsep_session_unittest
+TEST_P(SignalingTest, SetRemoteAnswerInHaveRemoteOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ // The signaling state will remain "have-remote-offer" because the
+ // SetRemoteDescription call fails.
+ a2_->SetRemote(TestObserver::ANSWER, a1_->offer(), true,
+ PCImplSignalingState::SignalingHaveRemoteOffer);
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// Disabled until the spec adds a failure callback to addStream
+// Actually, this is allowed I think, it just triggers a negotiationneeded
+TEST_P(SignalingTest, DISABLED_AddStreamInHaveLocalOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+ a1_->AddStream();
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+// Disabled until the spec adds a failure callback to removeStream
+// Actually, this is allowed I think, it just triggers a negotiationneeded
+TEST_P(SignalingTest, DISABLED_RemoveStreamInHaveLocalOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+ a1_->RemoveLastStreamAdded();
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kInvalidState);
+}
+
+TEST_P(SignalingTest, AddCandidateInHaveLocalOffer) {
+ OfferOptions options;
+ CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ ASSERT_EQ(a1_->pObserver->lastAddIceStatusCode,
+ PeerConnectionImpl::kNoError);
+ a1_->AddIceCandidate(strSampleCandidate,
+ strSampleMid, nSamplelevel, false);
+ ASSERT_EQ(PeerConnectionImpl::kInvalidState,
+ a1_->pObserver->lastAddIceStatusCode);
+}
+
+TEST_F(SignalingAgentTest, CreateOffer) {
+ CreateAgent(TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+ OfferOptions options;
+ agent(0)->CreateOffer(options, OFFER_AUDIO);
+}
+
+TEST_F(SignalingAgentTest, SetLocalWithoutCreateOffer) {
+ CreateAgent(TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+ CreateAgent(TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+ OfferOptions options;
+ agent(0)->CreateOffer(options, OFFER_AUDIO);
+ agent(1)->SetLocal(TestObserver::OFFER,
+ agent(0)->offer(),
+ true,
+ PCImplSignalingState::SignalingStable);
+}
+
+TEST_F(SignalingAgentTest, SetLocalWithoutCreateAnswer) {
+ CreateAgent(TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+ CreateAgent(TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+ CreateAgent(TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+ OfferOptions options;
+ agent(0)->CreateOffer(options, OFFER_AUDIO);
+ agent(1)->SetRemote(TestObserver::OFFER, agent(0)->offer());
+ agent(1)->CreateAnswer(ANSWER_AUDIO);
+ agent(2)->SetRemote(TestObserver::OFFER, agent(0)->offer());
+ // Use agent 1's answer on agent 2, should fail
+ agent(2)->SetLocal(TestObserver::ANSWER,
+ agent(1)->answer(),
+ true,
+ PCImplSignalingState::SignalingHaveRemoteOffer);
+}
+
+TEST_F(SignalingAgentTest, CreateOfferSetLocalTrickleTestServer) {
+ TestStunServer::GetInstance()->SetActive(false);
+ TestStunServer::GetInstance()->SetResponseAddr(
+ kBogusSrflxAddress, kBogusSrflxPort);
+
+ CreateAgent(
+ TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+
+ OfferOptions options;
+ agent(0)->CreateOffer(options, OFFER_AUDIO);
+
+ // Verify that the bogus addr is not there.
+ ASSERT_FALSE(agent(0)->OfferContains(kBogusSrflxAddress));
+
+ // Now enable the STUN server.
+ TestStunServer::GetInstance()->SetActive(true);
+
+ agent(0)->SetLocal(TestObserver::OFFER, agent(0)->offer());
+ agent(0)->WaitForGather();
+
+ // Verify that we got our candidates.
+ ASSERT_LE(2U, agent(0)->MatchingCandidates(kBogusSrflxAddress));
+
+ // Verify that the candidates appear in the offer.
+ size_t match;
+ match = agent(0)->getLocalDescription().find(kBogusSrflxAddress);
+ ASSERT_LT(0U, match);
+}
+
+
+TEST_F(SignalingAgentTest, CreateAnswerSetLocalTrickleTestServer) {
+ TestStunServer::GetInstance()->SetActive(false);
+ TestStunServer::GetInstance()->SetResponseAddr(
+ kBogusSrflxAddress, kBogusSrflxPort);
+
+ CreateAgent(
+ TestStunServer::GetInstance()->addr(),
+ TestStunServer::GetInstance()->port());
+
+ std::string offer(strG711SdpOffer);
+ agent(0)->SetRemote(TestObserver::OFFER, offer, true,
+ PCImplSignalingState::SignalingHaveRemoteOffer);
+ ASSERT_EQ(agent(0)->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ agent(0)->CreateAnswer(ANSWER_AUDIO);
+
+ // Verify that the bogus addr is not there.
+ ASSERT_FALSE(agent(0)->AnswerContains(kBogusSrflxAddress));
+
+ // Now enable the STUN server.
+ TestStunServer::GetInstance()->SetActive(true);
+
+ agent(0)->SetLocal(TestObserver::ANSWER, agent(0)->answer());
+ agent(0)->WaitForGather();
+
+ // Verify that we got our candidates.
+ ASSERT_LE(2U, agent(0)->MatchingCandidates(kBogusSrflxAddress));
+
+ // Verify that the candidates appear in the answer.
+ size_t match;
+ match = agent(0)->getLocalDescription().find(kBogusSrflxAddress);
+ ASSERT_LT(0U, match);
+}
+
+
+
+TEST_F(SignalingAgentTest, CreateLotsAndWait) {
+ int i;
+
+ for (i=0; i < 100; i++) {
+ if (!CreateAgent())
+ break;
+ std::cerr << "Created agent " << i << std::endl;
+ }
+ PR_Sleep(1000); // Wait to see if we crash
+}
+
+// Test for bug 856433.
+TEST_F(SignalingAgentTest, CreateNoInit) {
+ CreateAgentNoInit();
+}
+
+
+/*
+ * Test for Bug 843595
+ */
+TEST_P(SignalingTest, missingUfrag)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ std::string offer =
+ "v=0\r\n"
+ "o=Mozilla-SIPUA 2208 0 IN IP4 0.0.0.0\r\n"
+ "s=SIP Call\r\n"
+ "t=0 0\r\n"
+ "a=ice-pwd:4450d5a4a5f097855c16fa079893be18\r\n"
+ "a=fingerprint:sha-256 23:9A:2E:43:94:42:CF:46:68:FC:62:F9:F4:48:61:DB:"
+ "2F:8C:C9:FF:6B:25:54:9D:41:09:EF:83:A8:19:FC:B6\r\n"
+ "m=audio 56187 UDP/TLS/RTP/SAVPF 109 0 8 101\r\n"
+ "c=IN IP4 77.9.79.167\r\n"
+ "a=rtpmap:109 opus/48000/2\r\n"
+ "a=ptime:20\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:101 telephone-event/8000\r\n"
+ "a=fmtp:101 0-15\r\n"
+ "a=sendrecv\r\n"
+ "a=candidate:0 1 UDP 2113601791 192.168.178.20 56187 typ host\r\n"
+ "a=candidate:1 1 UDP 1694236671 77.9.79.167 56187 typ srflx raddr "
+ "192.168.178.20 rport 56187\r\n"
+ "a=candidate:0 2 UDP 2113601790 192.168.178.20 52955 typ host\r\n"
+ "a=candidate:1 2 UDP 1694236670 77.9.79.167 52955 typ srflx raddr "
+ "192.168.178.20 rport 52955\r\n"
+ "m=video 49929 UDP/TLS/RTP/SAVPF 120\r\n"
+ "c=IN IP4 77.9.79.167\r\n"
+ "a=rtpmap:120 VP8/90000\r\n"
+ "a=recvonly\r\n"
+ "a=candidate:0 1 UDP 2113601791 192.168.178.20 49929 typ host\r\n"
+ "a=candidate:1 1 UDP 1694236671 77.9.79.167 49929 typ srflx raddr "
+ "192.168.178.20 rport 49929\r\n"
+ "a=candidate:0 2 UDP 2113601790 192.168.178.20 50769 typ host\r\n"
+ "a=candidate:1 2 UDP 1694236670 77.9.79.167 50769 typ srflx raddr "
+ "192.168.178.20 rport 50769\r\n"
+ "m=application 54054 DTLS/SCTP 5000\r\n"
+ "c=IN IP4 77.9.79.167\r\n"
+ "a=fmtp:HuRUu]Dtcl\\zM,7(OmEU%O$gU]x/z\tD protocol=webrtc-datachannel;"
+ "streams=16\r\n"
+ "a=sendrecv\r\n";
+
+ // Need to create an offer, since that's currently required by our
+ // FSM. This may change in the future.
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), true);
+ // We now detect the missing ICE parameters at SetRemoteDescription
+ a2_->SetRemote(TestObserver::OFFER, offer, true,
+ PCImplSignalingState::SignalingStable);
+ ASSERT_TRUE(a2_->pObserver->state == TestObserver::stateError);
+}
+
+TEST_P(SignalingTest, AudioOnlyCalleeNoRtcpMux)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), false);
+ ParsedSDP sdpWrapper(a1_->offer());
+ sdpWrapper.DeleteLine("a=rtcp-mux");
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, sdpWrapper.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ a1_->mExpectRtcpMuxAudio = false;
+ a2_->mExpectRtcpMuxAudio = false;
+
+ // Answer should not have a=rtcp-mux
+ ASSERT_EQ(a2_->getLocalDescription().find("\r\na=rtcp-mux"),
+ std::string::npos) << "SDP was: " << a2_->getLocalDescription();
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+
+
+TEST_P(SignalingTest, AudioOnlyG722Only)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), false);
+ ParsedSDP sdpWrapper(a1_->offer());
+ sdpWrapper.ReplaceLine("m=audio",
+ "m=audio 65375 UDP/TLS/RTP/SAVPF 9\r\n");
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, sdpWrapper.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+ ASSERT_NE(a2_->getLocalDescription().find("UDP/TLS/RTP/SAVPF 9\r"),
+ std::string::npos);
+ ASSERT_NE(a2_->getLocalDescription().find("a=rtpmap:9 G722/8000"), std::string::npos);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, AudioOnlyG722MostPreferred)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), false);
+ ParsedSDP sdpWrapper(a1_->offer());
+ sdpWrapper.ReplaceLine("m=audio",
+ "m=audio 65375 UDP/TLS/RTP/SAVPF 9 0 8 109\r\n");
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, sdpWrapper.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+ ASSERT_NE(a2_->getLocalDescription().find("UDP/TLS/RTP/SAVPF 9"),
+ std::string::npos);
+ ASSERT_NE(a2_->getLocalDescription().find("a=rtpmap:9 G722/8000"), std::string::npos);
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, AudioOnlyG722Rejected)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+ // creating different SDPs as a workaround for rejecting codecs
+ // this way the answerer should pick a codec with lower priority
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), false);
+ ParsedSDP sdpWrapper(a1_->offer());
+ sdpWrapper.ReplaceLine("m=audio",
+ "m=audio 65375 UDP/TLS/RTP/SAVPF 0 8\r\n");
+ std::cout << "Modified SDP offer " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, sdpWrapper.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+ // TODO(bug 814227): Use commented out code instead.
+ ASSERT_NE(a2_->getLocalDescription().find("UDP/TLS/RTP/SAVPF 0\r"),
+ std::string::npos);
+ // ASSERT_NE(a2_->getLocalDescription().find("UDP/TLS/RTP/SAVPF 0 8\r"), std::string::npos);
+ ASSERT_NE(a2_->getLocalDescription().find("a=rtpmap:0 PCMU/8000"), std::string::npos);
+ ASSERT_EQ(a2_->getLocalDescription().find("a=rtpmap:109 opus/48000/2"), std::string::npos);
+ ASSERT_EQ(a2_->getLocalDescription().find("a=rtpmap:9 G722/8000"), std::string::npos);
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, RestartIce)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ options.setBoolOption("IceRestart", true);
+ OfferAnswer(options, OFFER_NONE);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, FullCallAudioNoMuxVideoMux)
+{
+ if (UseBundle()) {
+ // This test doesn't make sense for bundle
+ return;
+ }
+
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), false);
+ ParsedSDP sdpWrapper(a1_->offer());
+ sdpWrapper.DeleteLine("a=rtcp-mux");
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+ a2_->SetRemote(TestObserver::OFFER, sdpWrapper.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AV | ANSWER_AV);
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ // Answer should have only one a=rtcp-mux line
+ size_t match = a2_->getLocalDescription().find("\r\na=rtcp-mux");
+ ASSERT_NE(match, std::string::npos);
+ match = a2_->getLocalDescription().find("\r\na=rtcp-mux", match + 1);
+ ASSERT_EQ(match, std::string::npos);
+
+ a1_->mExpectRtcpMuxAudio = false;
+ a2_->mExpectRtcpMuxAudio = false;
+
+ WaitForCompleted();
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+// TODO: Move to jsep_sesion_unittest
+TEST_P(SignalingTest, RtcpFbInOffer)
+{
+ EnsureInit();
+ OfferOptions options;
+ a1_->CreateOffer(options, OFFER_AV);
+ const char *expected[] = { "nack", "nack pli", "ccm fir" };
+ CheckRtcpFbSdp(a1_->offer(), ARRAY_TO_SET(std::string, expected));
+}
+
+TEST_P(SignalingTest, RtcpFbOfferAll)
+{
+ const char *feedbackTypes[] = { "nack", "nack pli", "ccm fir" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferNoNackBasic)
+{
+ const char *feedbackTypes[] = { "nack pli", "ccm fir" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ false,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferNoNackPli)
+{
+ const char *feedbackTypes[] = { "nack", "ccm fir" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestFir);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferNoCcmFir)
+{
+ const char *feedbackTypes[] = { "nack", "nack pli" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferNoNack)
+{
+ const char *feedbackTypes[] = { "ccm fir" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ false,
+ VideoSessionConduit::FrameRequestFir);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferNoFrameRequest)
+{
+ const char *feedbackTypes[] = { "nack" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestNone);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferPliOnly)
+{
+ const char *feedbackTypes[] = { "nack pli" };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ false,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbOfferNoFeedback)
+{
+ const char *feedbackTypes[] = { };
+ TestRtcpFbOffer(ARRAY_TO_SET(std::string, feedbackTypes),
+ false,
+ VideoSessionConduit::FrameRequestNone);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerAll)
+{
+ const char *feedbackTypes[] = { "nack", "nack pli", "ccm fir" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerNoNackBasic)
+{
+ const char *feedbackTypes[] = { "nack pli", "ccm fir" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ false,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerNoNackPli)
+{
+ const char *feedbackTypes[] = { "nack", "ccm fir" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestFir);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerNoCcmFir)
+{
+ const char *feedbackTypes[] = { "nack", "nack pli" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerNoNack)
+{
+ const char *feedbackTypes[] = { "ccm fir" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ false,
+ VideoSessionConduit::FrameRequestFir);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerNoFrameRequest)
+{
+ const char *feedbackTypes[] = { "nack" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ true,
+ VideoSessionConduit::FrameRequestNone);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerPliOnly)
+{
+ const char *feedbackTypes[] = { "nack pli" };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ 0,
+ VideoSessionConduit::FrameRequestPli);
+}
+
+TEST_P(SignalingTest, RtcpFbAnswerNoFeedback)
+{
+ const char *feedbackTypes[] = { };
+ TestRtcpFbAnswer(ARRAY_TO_SET(std::string, feedbackTypes),
+ 0,
+ VideoSessionConduit::FrameRequestNone);
+}
+
+// In this test we will change the offer SDP's a=setup value
+// from actpass to passive. This will make the answer do active.
+TEST_P(SignalingTest, AudioCallForceDtlsRoles)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+
+ // By default the offer should give actpass
+ std::string offer(a1_->offer());
+ match = offer.find("\r\na=setup:actpass");
+ ASSERT_NE(match, std::string::npos);
+ // Now replace the actpass with passive so that the answer will
+ // return active
+ offer.replace(match, strlen("\r\na=setup:actpass"),
+ "\r\na=setup:passive");
+ std::cout << "Modified SDP " << std::endl
+ << indent(offer) << std::endl;
+
+ a1_->SetLocal(TestObserver::OFFER, offer, false);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ // Now the answer should contain a=setup:active
+ std::string answer(a2_->answer());
+ match = answer.find("\r\na=setup:active");
+ ASSERT_NE(match, std::string::npos);
+
+ // This should setup the DTLS with the same roles
+ // as the regular tests above.
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+// In this test we will change the offer SDP's a=setup value
+// from actpass to active. This will make the answer do passive
+TEST_P(SignalingTest, AudioCallReverseDtlsRoles)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+
+ // By default the offer should give actpass
+ std::string offer(a1_->offer());
+ match = offer.find("\r\na=setup:actpass");
+ ASSERT_NE(match, std::string::npos);
+ // Now replace the actpass with active so that the answer will
+ // return passive
+ offer.replace(match, strlen("\r\na=setup:actpass"),
+ "\r\na=setup:active");
+ std::cout << "Modified SDP " << std::endl
+ << indent(offer) << std::endl;
+
+ a1_->SetLocal(TestObserver::OFFER, offer, false);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ // Now the answer should contain a=setup:passive
+ std::string answer(a2_->answer());
+ match = answer.find("\r\na=setup:passive");
+ ASSERT_NE(match, std::string::npos);
+
+ // This should setup the DTLS with the opposite roles
+ // than the regular tests above.
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+// In this test we will change the answer SDP's a=setup value
+// from active to passive. This will make both sides do
+// active and should not connect.
+TEST_P(SignalingTest, AudioCallMismatchDtlsRoles)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+
+ // By default the offer should give actpass
+ std::string offer(a1_->offer());
+ match = offer.find("\r\na=setup:actpass");
+ ASSERT_NE(match, std::string::npos);
+ a1_->SetLocal(TestObserver::OFFER, offer, false);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ // Now the answer should contain a=setup:active
+ std::string answer(a2_->answer());
+ match = answer.find("\r\na=setup:active");
+ ASSERT_NE(match, std::string::npos);
+ a2_->SetLocal(TestObserver::ANSWER, answer, false);
+
+ // Now replace the active with passive so that the offerer will
+ // also do active.
+ answer.replace(match, strlen("\r\na=setup:active"),
+ "\r\na=setup:passive");
+ std::cout << "Modified SDP " << std::endl
+ << indent(answer) << std::endl;
+
+ // This should setup the DTLS with both sides playing active
+ a1_->SetRemote(TestObserver::ANSWER, answer, false);
+
+ WaitForCompleted();
+
+ // Not using ASSERT_TRUE_WAIT here because we expect failure
+ PR_Sleep(500); // Wait for some data to get written
+
+ CloseStreams();
+
+ ASSERT_GE(a1_->GetPacketsSent(0), 4);
+ // In this case we should receive nothing.
+ ASSERT_EQ(a2_->GetPacketsReceived(0), 0);
+}
+
+// In this test we will change the offer SDP's a=setup value
+// from actpass to garbage. It should ignore the garbage value
+// and respond with setup:active
+TEST_P(SignalingTest, AudioCallGarbageSetup)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+
+ // By default the offer should give actpass
+ std::string offer(a1_->offer());
+ match = offer.find("\r\na=setup:actpass");
+ ASSERT_NE(match, std::string::npos);
+ // Now replace the actpass with a garbage value
+ offer.replace(match, strlen("\r\na=setup:actpass"),
+ "\r\na=setup:G4rb4g3V4lu3");
+ std::cout << "Modified SDP " << std::endl
+ << indent(offer) << std::endl;
+
+ a1_->SetLocal(TestObserver::OFFER, offer, false);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ // Now the answer should contain a=setup:active
+ std::string answer(a2_->answer());
+ match = answer.find("\r\na=setup:active");
+ ASSERT_NE(match, std::string::npos);
+
+ // This should setup the DTLS with the same roles
+ // as the regular tests above.
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+// In this test we will change the offer SDP to remove the
+// a=setup line. Answer should respond with a=setup:active.
+TEST_P(SignalingTest, AudioCallOfferNoSetupOrConnection)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+
+ std::string offer(a1_->offer());
+ a1_->SetLocal(TestObserver::OFFER, offer, false);
+
+ // By default the offer should give setup:actpass
+ match = offer.find("\r\na=setup:actpass");
+ ASSERT_NE(match, std::string::npos);
+ // Remove the a=setup line
+ offer.replace(match, strlen("\r\na=setup:actpass"), "");
+ std::cout << "Modified SDP " << std::endl
+ << indent(offer) << std::endl;
+
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ // Now the answer should contain a=setup:active
+ std::string answer(a2_->answer());
+ match = answer.find("\r\na=setup:active");
+ ASSERT_NE(match, std::string::npos);
+
+ // This should setup the DTLS with the same roles
+ // as the regular tests above.
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+// In this test we will change the answer SDP to remove the
+// a=setup line. ICE should still connect since active will
+// be assumed.
+TEST_P(SignalingTest, AudioCallAnswerNoSetupOrConnection)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AUDIO);
+
+ // By default the offer should give setup:actpass
+ std::string offer(a1_->offer());
+ match = offer.find("\r\na=setup:actpass");
+ ASSERT_NE(match, std::string::npos);
+
+ a1_->SetLocal(TestObserver::OFFER, offer, false);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AUDIO | ANSWER_AUDIO);
+
+ // Now the answer should contain a=setup:active
+ std::string answer(a2_->answer());
+ match = answer.find("\r\na=setup:active");
+ ASSERT_NE(match, std::string::npos);
+ // Remove the a=setup line
+ answer.replace(match, strlen("\r\na=setup:active"), "");
+ std::cout << "Modified SDP " << std::endl
+ << indent(answer) << std::endl;
+
+ // This should setup the DTLS with the same roles
+ // as the regular tests above.
+ a2_->SetLocal(TestObserver::ANSWER, answer, false);
+ a1_->SetRemote(TestObserver::ANSWER, answer, false);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+
+TEST_P(SignalingTest, FullCallRealTrickle)
+{
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, FullCallRealTrickleTestServer)
+{
+ SetTestStunServer();
+
+ OfferOptions options;
+ OfferAnswer(options, OFFER_AV | ANSWER_AV);
+
+ TestStunServer::GetInstance()->SetActive(true);
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, hugeSdp)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ std::string offer =
+ "v=0\r\n"
+ "o=- 1109973417102828257 2 IN IP4 127.0.0.1\r\n"
+ "s=-\r\n"
+ "t=0 0\r\n"
+ "a=group:BUNDLE audio video\r\n"
+ "a=msid-semantic: WMS 1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIP\r\n"
+ "m=audio 32952 UDP/TLS/RTP/SAVPF 111 103 104 0 8 107 106 105 13 126\r\n"
+ "c=IN IP4 128.64.32.16\r\n"
+ "a=rtcp:32952 IN IP4 128.64.32.16\r\n"
+ "a=candidate:77142221 1 udp 2113937151 192.168.137.1 54081 typ host generation 0\r\n"
+ "a=candidate:77142221 2 udp 2113937151 192.168.137.1 54081 typ host generation 0\r\n"
+ "a=candidate:983072742 1 udp 2113937151 172.22.0.56 54082 typ host generation 0\r\n"
+ "a=candidate:983072742 2 udp 2113937151 172.22.0.56 54082 typ host generation 0\r\n"
+ "a=candidate:2245074553 1 udp 1845501695 32.64.128.1 62397 typ srflx raddr 192.168.137.1 rport 54081 generation 0\r\n"
+ "a=candidate:2245074553 2 udp 1845501695 32.64.128.1 62397 typ srflx raddr 192.168.137.1 rport 54081 generation 0\r\n"
+ "a=candidate:2479353907 1 udp 1845501695 32.64.128.1 54082 typ srflx raddr 172.22.0.56 rport 54082 generation 0\r\n"
+ "a=candidate:2479353907 2 udp 1845501695 32.64.128.1 54082 typ srflx raddr 172.22.0.56 rport 54082 generation 0\r\n"
+ "a=candidate:1243276349 1 tcp 1509957375 192.168.137.1 0 typ host generation 0\r\n"
+ "a=candidate:1243276349 2 tcp 1509957375 192.168.137.1 0 typ host generation 0\r\n"
+ "a=candidate:1947960086 1 tcp 1509957375 172.22.0.56 0 typ host generation 0\r\n"
+ "a=candidate:1947960086 2 tcp 1509957375 172.22.0.56 0 typ host generation 0\r\n"
+ "a=candidate:1808221584 1 udp 33562367 128.64.32.16 32952 typ relay raddr 32.64.128.1 rport 62398 generation 0\r\n"
+ "a=candidate:1808221584 2 udp 33562367 128.64.32.16 32952 typ relay raddr 32.64.128.1 rport 62398 generation 0\r\n"
+ "a=candidate:507872740 1 udp 33562367 128.64.32.16 40975 typ relay raddr 32.64.128.1 rport 54085 generation 0\r\n"
+ "a=candidate:507872740 2 udp 33562367 128.64.32.16 40975 typ relay raddr 32.64.128.1 rport 54085 generation 0\r\n"
+ "a=ice-ufrag:xQuJwjX3V3eMA81k\r\n"
+ "a=ice-pwd:ZUiRmjS2GDhG140p73dAsSVP\r\n"
+ "a=ice-options:google-ice\r\n"
+ "a=fingerprint:sha-256 59:4A:8B:73:A7:73:53:71:88:D7:4D:58:28:0C:79:72:31:29:9B:05:37:DD:58:43:C2:D4:85:A2:B3:66:38:7A\r\n"
+ "a=setup:active\r\n"
+ "a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\n"
+ "a=sendrecv\r\n"
+ "a=mid:audio\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:/U44g3ULdtapeiSg+T3n6dDLBKIjpOhb/NXAL/2b\r\n"
+ "a=rtpmap:111 opus/48000/2\r\n"
+ "a=fmtp:111 minptime=10\r\n"
+ "a=rtpmap:103 ISAC/16000\r\n"
+ "a=rtpmap:104 ISAC/32000\r\n"
+ "a=rtpmap:0 PCMU/8000\r\n"
+ "a=rtpmap:8 PCMA/8000\r\n"
+ "a=rtpmap:107 CN/48000\r\n"
+ "a=rtpmap:106 CN/32000\r\n"
+ "a=rtpmap:105 CN/16000\r\n"
+ "a=rtpmap:13 CN/8000\r\n"
+ "a=rtpmap:126 telephone-event/8000\r\n"
+ "a=maxptime:60\r\n"
+ "a=ssrc:2271517329 cname:mKDNt7SQf6pwDlIn\r\n"
+ "a=ssrc:2271517329 msid:1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIP 1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIPa0\r\n"
+ "a=ssrc:2271517329 mslabel:1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIP\r\n"
+ "a=ssrc:2271517329 label:1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIPa0\r\n"
+ "m=video 32952 UDP/TLS/RTP/SAVPF 100 116 117\r\n"
+ "c=IN IP4 128.64.32.16\r\n"
+ "a=rtcp:32952 IN IP4 128.64.32.16\r\n"
+ "a=candidate:77142221 1 udp 2113937151 192.168.137.1 54081 typ host generation 0\r\n"
+ "a=candidate:77142221 2 udp 2113937151 192.168.137.1 54081 typ host generation 0\r\n"
+ "a=candidate:983072742 1 udp 2113937151 172.22.0.56 54082 typ host generation 0\r\n"
+ "a=candidate:983072742 2 udp 2113937151 172.22.0.56 54082 typ host generation 0\r\n"
+ "a=candidate:2245074553 1 udp 1845501695 32.64.128.1 62397 typ srflx raddr 192.168.137.1 rport 54081 generation 0\r\n"
+ "a=candidate:2245074553 2 udp 1845501695 32.64.128.1 62397 typ srflx raddr 192.168.137.1 rport 54081 generation 0\r\n"
+ "a=candidate:2479353907 1 udp 1845501695 32.64.128.1 54082 typ srflx raddr 172.22.0.56 rport 54082 generation 0\r\n"
+ "a=candidate:2479353907 2 udp 1845501695 32.64.128.1 54082 typ srflx raddr 172.22.0.56 rport 54082 generation 0\r\n"
+ "a=candidate:1243276349 1 tcp 1509957375 192.168.137.1 0 typ host generation 0\r\n"
+ "a=candidate:1243276349 2 tcp 1509957375 192.168.137.1 0 typ host generation 0\r\n"
+ "a=candidate:1947960086 1 tcp 1509957375 172.22.0.56 0 typ host generation 0\r\n"
+ "a=candidate:1947960086 2 tcp 1509957375 172.22.0.56 0 typ host generation 0\r\n"
+ "a=candidate:1808221584 1 udp 33562367 128.64.32.16 32952 typ relay raddr 32.64.128.1 rport 62398 generation 0\r\n"
+ "a=candidate:1808221584 2 udp 33562367 128.64.32.16 32952 typ relay raddr 32.64.128.1 rport 62398 generation 0\r\n"
+ "a=candidate:507872740 1 udp 33562367 128.64.32.16 40975 typ relay raddr 32.64.128.1 rport 54085 generation 0\r\n"
+ "a=candidate:507872740 2 udp 33562367 128.64.32.16 40975 typ relay raddr 32.64.128.1 rport 54085 generation 0\r\n"
+ "a=ice-ufrag:xQuJwjX3V3eMA81k\r\n"
+ "a=ice-pwd:ZUiRmjS2GDhG140p73dAsSVP\r\n"
+ "a=ice-options:google-ice\r\n"
+ "a=fingerprint:sha-256 59:4A:8B:73:A7:73:53:71:88:D7:4D:58:28:0C:79:72:31:29:9B:05:37:DD:58:43:C2:D4:85:A2:B3:66:38:7A\r\n"
+ "a=setup:active\r\n"
+ "a=extmap:2 urn:ietf:params:rtp-hdrext:toffset\r\n"
+ "a=extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\n"
+ "a=sendrecv\r\n"
+ "a=mid:video\r\n"
+ "a=rtcp-mux\r\n"
+ "a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:/U44g3ULdtapeiSg+T3n6dDLBKIjpOhb/NXAL/2b\r\n"
+ "a=rtpmap:100 VP8/90000\r\n"
+ "a=rtcp-fb:100 ccm fir\r\n"
+ "a=rtcp-fb:100 nack\r\n"
+ "a=rtcp-fb:100 goog-remb\r\n"
+ "a=rtpmap:116 red/90000\r\n"
+ "a=rtpmap:117 ulpfec/90000\r\n"
+ "a=ssrc:54724160 cname:mKDNt7SQf6pwDlIn\r\n"
+ "a=ssrc:54724160 msid:1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIP 1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIPv0\r\n"
+ "a=ssrc:54724160 mslabel:1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIP\r\n"
+ "a=ssrc:54724160 label:1PBxet5BYh0oYodwsvNM4k6KiO2eWCX40VIPv0\r\n";
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer(), true);
+
+ a2_->SetRemote(TestObserver::OFFER, offer, true);
+ ASSERT_GE(a2_->getRemoteDescription().length(), 4096U);
+ a2_->CreateAnswer(OFFER_AV);
+}
+
+// Test max_fs and max_fr prefs have proper impact on SDP offer
+TEST_P(SignalingTest, MaxFsFrInOffer)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
+ ASSERT_TRUE(prefs);
+ FsFrPrefClearer prefClearer(prefs);
+
+ SetMaxFsFr(prefs, 300, 30);
+
+ a1_->CreateOffer(options, OFFER_AV);
+
+ // Verify that SDP contains correct max-fs and max-fr
+ CheckMaxFsFrSdp(a1_->offer(), 120, 300, 30);
+}
+
+// Test max_fs and max_fr prefs have proper impact on SDP answer
+TEST_P(SignalingTest, MaxFsFrInAnswer)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
+ ASSERT_TRUE(prefs);
+ FsFrPrefClearer prefClearer(prefs);
+
+ a1_->CreateOffer(options, OFFER_AV);
+
+ SetMaxFsFr(prefs, 600, 60);
+
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+
+ a2_->CreateAnswer(OFFER_AV | ANSWER_AV);
+
+ // Verify that SDP contains correct max-fs and max-fr
+ CheckMaxFsFrSdp(a2_->answer(), 120, 600, 60);
+}
+
+// Test SDP offer has proper impact on callee's codec configuration
+TEST_P(SignalingTest, MaxFsFrCalleeCodec)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
+ ASSERT_TRUE(prefs);
+ FsFrPrefClearer prefClearer(prefs);
+
+ SetMaxFsFr(prefs, 300, 30);
+ a1_->CreateOffer(options, OFFER_AV);
+
+ CheckMaxFsFrSdp(a1_->offer(), 120, 300, 30);
+
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+
+ SetMaxFsFr(prefs, 3601, 31);
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+
+ a2_->CreateAnswer(OFFER_AV | ANSWER_AV);
+
+ CheckMaxFsFrSdp(a2_->answer(), 120, 3601, 31);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer());
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ // Checking callee's video sending configuration does respect max-fs and
+ // max-fr in SDP offer.
+ RefPtr<mozilla::MediaPipeline> pipeline =
+ a2_->GetMediaPipeline(1, 0, 1);
+ ASSERT_TRUE(pipeline);
+ mozilla::MediaSessionConduit *conduit = pipeline->Conduit();
+ ASSERT_TRUE(conduit);
+ ASSERT_EQ(conduit->type(), mozilla::MediaSessionConduit::VIDEO);
+ mozilla::VideoSessionConduit *video_conduit =
+ static_cast<mozilla::VideoSessionConduit*>(conduit);
+
+ ASSERT_EQ(video_conduit->SendingMaxFs(), (unsigned short) 300);
+ ASSERT_EQ(video_conduit->SendingMaxFr(), (unsigned short) 30);
+}
+
+// Test SDP answer has proper impact on caller's codec configuration
+TEST_P(SignalingTest, MaxFsFrCallerCodec)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
+ ASSERT_TRUE(prefs);
+ FsFrPrefClearer prefClearer(prefs);
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+
+ SetMaxFsFr(prefs, 600, 60);
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer());
+
+ a2_->CreateAnswer(OFFER_AV | ANSWER_AV);
+
+ // Double confirm that SDP answer contains correct max-fs and max-fr
+ CheckMaxFsFrSdp(a2_->answer(), 120, 600, 60);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer());
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer());
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ // Checking caller's video sending configuration does respect max-fs and
+ // max-fr in SDP answer.
+ RefPtr<mozilla::MediaPipeline> pipeline =
+ a1_->GetMediaPipeline(1, 0, 1);
+ ASSERT_TRUE(pipeline);
+ mozilla::MediaSessionConduit *conduit = pipeline->Conduit();
+ ASSERT_TRUE(conduit);
+ ASSERT_EQ(conduit->type(), mozilla::MediaSessionConduit::VIDEO);
+ mozilla::VideoSessionConduit *video_conduit =
+ static_cast<mozilla::VideoSessionConduit*>(conduit);
+
+ ASSERT_EQ(video_conduit->SendingMaxFs(), (unsigned short) 600);
+ ASSERT_EQ(video_conduit->SendingMaxFr(), (unsigned short) 60);
+}
+
+// Validate offer with multiple video codecs
+TEST_P(SignalingTest, ValidateMultipleVideoCodecsInOffer)
+{
+ EnsureInit();
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ std::string offer = a1_->offer();
+
+#ifdef H264_P0_SUPPORTED
+ ASSERT_NE(offer.find("UDP/TLS/RTP/SAVPF 120 126 97") ||
+ offer.find("UDP/TLS/RTP/SAVPF 120 121 126 97"), std::string::npos);
+#else
+ ASSERT_NE(offer.find("UDP/TLS/RTP/SAVPF 120 126") ||
+ offer.find("UDP/TLS/RTP/SAVPF 120 121 126"), std::string::npos);
+#endif
+ ASSERT_NE(offer.find("a=rtpmap:120 VP8/90000"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtpmap:126 H264/90000"), std::string::npos);
+ ASSERT_NE(offer.find("a=fmtp:126 profile-level-id="), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:120 nack"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:120 nack pli"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:120 ccm fir"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:126 nack"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:126 nack pli"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:126 ccm fir"), std::string::npos);
+#ifdef H264_P0_SUPPORTED
+ ASSERT_NE(offer.find("a=rtpmap:97 H264/90000"), std::string::npos);
+ ASSERT_NE(offer.find("a=fmtp:97 profile-level-id="), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:97 nack"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:97 nack pli"), std::string::npos);
+ ASSERT_NE(offer.find("a=rtcp-fb:97 ccm fir"), std::string::npos);
+#endif
+}
+
+// Remove VP8 from offer and check that answer negotiates H264 P1 correctly and ignores unknown params
+TEST_P(SignalingTest, RemoveVP8FromOfferWithP1First)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AV);
+
+ // Remove VP8 from offer
+ std::string offer = a1_->offer();
+ match = offer.find("UDP/TLS/RTP/SAVPF 120");
+ if (match != std::string::npos) {
+ offer.replace(match, strlen("UDP/TLS/RTP/SAVPF 120"), "UDP/TLS/RTP/SAVPF");
+ }
+ match = offer.find("UDP/TLS/RTP/SAVPF 121");
+ if (match != std::string::npos) {
+ offer.replace(match, strlen("UDP/TLS/RTP/SAVPF 121"), "UDP/TLS/RTP/SAVPF");
+ }
+ match = offer.find("UDP/TLS/RTP/SAVPF 126");
+ ASSERT_NE(std::string::npos, match);
+
+ match = offer.find("profile-level-id");
+ ASSERT_NE(std::string::npos, match);
+ offer.replace(match,
+ strlen("profile-level-id"),
+ "max-foo=1234;profile-level-id");
+
+ ParsedSDP sdpWrapper(offer);
+ sdpWrapper.DeleteLines("a=rtcp-fb:120");
+ sdpWrapper.DeleteLine("a=rtpmap:120");
+
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+
+ // P1 should be offered first
+ ASSERT_NE(offer.find("UDP/TLS/RTP/SAVPF 126"), std::string::npos);
+
+ a1_->SetLocal(TestObserver::OFFER, sdpWrapper.getSdp());
+ a2_->SetRemote(TestObserver::OFFER, sdpWrapper.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AV);
+
+ std::string answer(a2_->answer());
+
+ // Validate answer SDP
+ ASSERT_NE(answer.find("UDP/TLS/RTP/SAVPF 126"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtpmap:126 H264/90000"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:126 nack"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:126 nack pli"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:126 ccm fir"), std::string::npos);
+ // Ensure VP8 removed
+ ASSERT_EQ(answer.find("a=rtpmap:120 VP8/90000"), std::string::npos);
+ ASSERT_EQ(answer.find("a=rtcp-fb:120"), std::string::npos);
+}
+
+// Insert H.264 before VP8 in Offer, check answer selects H.264
+TEST_P(SignalingTest, OfferWithH264BeforeVP8)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AV);
+
+ // Swap VP8 and P1 in offer
+ std::string offer = a1_->offer();
+#ifdef H264_P0_SUPPORTED
+ match = offer.find("UDP/TLS/RTP/SAVPF 120 126 97");
+ ASSERT_NE(std::string::npos, match);
+ offer.replace(match,
+ strlen("UDP/TLS/RTP/SAVPF 126 120 97"),
+ "UDP/TLS/RTP/SAVPF 126 120 97");
+#else
+ match = offer.find("UDP/TLS/RTP/SAVPF 120 126");
+ ASSERT_NE(std::string::npos, match);
+ offer.replace(match,
+ strlen("UDP/TLS/RTP/SAVPF 126 120"),
+ "UDP/TLS/RTP/SAVPF 126 120");
+#endif
+
+ match = offer.find("a=rtpmap:126 H264/90000");
+ ASSERT_NE(std::string::npos, match);
+ offer.replace(match,
+ strlen("a=rtpmap:120 VP8/90000"),
+ "a=rtpmap:120 VP8/90000");
+
+ match = offer.find("a=rtpmap:120 VP8/90000");
+ ASSERT_NE(std::string::npos, match);
+ offer.replace(match,
+ strlen("a=rtpmap:126 H264/90000"),
+ "a=rtpmap:126 H264/90000");
+
+ std::cout << "Modified SDP " << std::endl
+ << indent(offer) << std::endl;
+
+ // P1 should be offered first
+#ifdef H264_P0_SUPPORTED
+ ASSERT_NE(offer.find("UDP/TLS/RTP/SAVPF 126 120 97"), std::string::npos);
+#else
+ ASSERT_NE(offer.find("UDP/TLS/RTP/SAVPF 126 120"), std::string::npos);
+#endif
+
+ a1_->SetLocal(TestObserver::OFFER, offer);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AV);
+
+ std::string answer(a2_->answer());
+
+ // Validate answer SDP
+ ASSERT_NE(answer.find("UDP/TLS/RTP/SAVPF 126"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtpmap:126 H264/90000"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:126 nack"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:126 nack pli"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:126 ccm fir"), std::string::npos);
+}
+
+#ifdef H264_P0_SUPPORTED
+// Remove H.264 P1 and VP8 from offer, check answer negotiates H.264 P0
+TEST_P(SignalingTest, OfferWithOnlyH264P0)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ size_t match;
+
+ a1_->CreateOffer(options, OFFER_AV);
+
+ // Remove VP8 from offer
+ std::string offer = a1_->offer();
+ match = offer.find("UDP/TLS/RTP/SAVPF 120 126");
+ ASSERT_NE(std::string::npos, match);
+ offer.replace(match,
+ strlen("UDP/TLS/RTP/SAVPF 120 126"),
+ "UDP/TLS/RTP/SAVPF");
+
+ ParsedSDP sdpWrapper(offer);
+ sdpWrapper.DeleteLines("a=rtcp-fb:120");
+ sdpWrapper.DeleteLine("a=rtpmap:120");
+ sdpWrapper.DeleteLines("a=rtcp-fb:126");
+ sdpWrapper.DeleteLine("a=rtpmap:126");
+ sdpWrapper.DeleteLine("a=fmtp:126");
+
+ std::cout << "Modified SDP " << std::endl
+ << indent(sdpWrapper.getSdp()) << std::endl;
+
+ // Offer shouldn't have P1 or VP8 now
+ offer = sdpWrapper.getSdp();
+ ASSERT_EQ(offer.find("a=rtpmap:126 H264/90000"), std::string::npos);
+ ASSERT_EQ(offer.find("a=rtpmap:120 VP8/90000"), std::string::npos);
+
+ // P0 should be offered first
+ ASSERT_NE(offer.find("UDP/TLS/RTP/SAVPF 97"), std::string::npos);
+
+ a1_->SetLocal(TestObserver::OFFER, offer);
+ a2_->SetRemote(TestObserver::OFFER, offer, false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AV);
+
+ std::string answer(a2_->answer());
+
+ // validate answer SDP
+ ASSERT_NE(answer.find("UDP/TLS/RTP/SAVPF 97"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtpmap:97 H264/90000"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:97 nack"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:97 nack pli"), std::string::npos);
+ ASSERT_NE(answer.find("a=rtcp-fb:97 ccm fir"), std::string::npos);
+ // Ensure VP8 and P1 removed
+ ASSERT_EQ(answer.find("a=rtpmap:126 H264/90000"), std::string::npos);
+ ASSERT_EQ(answer.find("a=rtpmap:120 VP8/90000"), std::string::npos);
+ ASSERT_EQ(answer.find("a=rtcp-fb:120"), std::string::npos);
+ ASSERT_EQ(answer.find("a=rtcp-fb:126"), std::string::npos);
+}
+#endif
+
+// Test negotiating an answer which has only H.264 P1
+// Which means replace VP8 with H.264 P1 in answer
+TEST_P(SignalingTest, AnswerWithoutVP8)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer(), false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AV);
+
+ std::string answer(a2_->answer());
+
+ // Ensure answer has VP8
+ ASSERT_NE(answer.find("\r\na=rtpmap:120 VP8/90000"), std::string::npos);
+
+ // Replace VP8 with H.264 P1
+ ParsedSDP sdpWrapper(a2_->answer());
+ sdpWrapper.AddLine("a=fmtp:126 profile-level-id=42e00c;level-asymmetry-allowed=1;packetization-mode=1\r\n");
+ size_t match;
+ answer = sdpWrapper.getSdp();
+
+ match = answer.find("UDP/TLS/RTP/SAVPF 120");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("UDP/TLS/RTP/SAVPF 120"),
+ "UDP/TLS/RTP/SAVPF 126");
+
+ match = answer.find("\r\na=rtpmap:120 VP8/90000");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtpmap:126 H264/90000"),
+ "\r\na=rtpmap:126 H264/90000");
+
+ match = answer.find("\r\na=rtcp-fb:120 nack");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtcp-fb:126 nack"),
+ "\r\na=rtcp-fb:126 nack");
+
+ match = answer.find("\r\na=rtcp-fb:120 nack pli");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtcp-fb:126 nack pli"),
+ "\r\na=rtcp-fb:126 nack pli");
+
+ match = answer.find("\r\na=rtcp-fb:120 ccm fir");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtcp-fb:126 ccm fir"),
+ "\r\na=rtcp-fb:126 ccm fir");
+
+ std::cout << "Modified SDP " << std::endl << indent(answer) << std::endl;
+
+ a2_->SetLocal(TestObserver::ANSWER, answer, false);
+
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->SetRemote(TestObserver::ANSWER, answer, false);
+
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ WaitForCompleted();
+
+ // We cannot check pipelines/streams since the H264 stuff won't init.
+
+ CloseStreams();
+}
+
+// Test using a non preferred dynamic video payload type on answer negotiation
+TEST_P(SignalingTest, UseNonPrefferedPayloadTypeOnAnswer)
+{
+ EnsureInit();
+
+ OfferOptions options;
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+ a2_->SetRemote(TestObserver::OFFER, a1_->offer(), false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AV);
+
+ std::string answer(a2_->answer());
+
+ // Ensure answer has VP8
+ ASSERT_NE(answer.find("\r\na=rtpmap:120 VP8/90000"), std::string::npos);
+
+ // Replace VP8 Payload Type with a non preferred value
+ size_t match;
+ match = answer.find("UDP/TLS/RTP/SAVPF 120");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("UDP/TLS/RTP/SAVPF 121"),
+ "UDP/TLS/RTP/SAVPF 121");
+
+ match = answer.find("\r\na=rtpmap:120 VP8/90000");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtpmap:121 VP8/90000"),
+ "\r\na=rtpmap:121 VP8/90000");
+
+ match = answer.find("\r\na=rtcp-fb:120 nack");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtcp-fb:121 nack"),
+ "\r\na=rtcp-fb:121 nack");
+
+ match = answer.find("\r\na=rtcp-fb:120 nack pli");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtcp-fb:121 nack pli"),
+ "\r\na=rtcp-fb:121 nack pli");
+
+ match = answer.find("\r\na=rtcp-fb:120 ccm fir");
+ ASSERT_NE(std::string::npos, match);
+ answer.replace(match,
+ strlen("\r\na=rtcp-fb:121 ccm fir"),
+ "\r\na=rtcp-fb:121 ccm fir");
+
+ std::cout << "Modified SDP " << std::endl
+ << indent(answer) << std::endl;
+
+ a2_->SetLocal(TestObserver::ANSWER, answer, false);
+
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->SetRemote(TestObserver::ANSWER, answer, false);
+
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, VideoNegotiationFails)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+
+ ParsedSDP parsedOffer(a1_->offer());
+ parsedOffer.DeleteLines("a=rtcp-fb:120");
+ parsedOffer.DeleteLines("a=rtcp-fb:126");
+ parsedOffer.DeleteLines("a=rtcp-fb:97");
+ parsedOffer.DeleteLines("a=rtpmap:120");
+ parsedOffer.DeleteLines("a=rtpmap:126");
+ parsedOffer.DeleteLines("a=rtpmap:97");
+ parsedOffer.AddLine("a=rtpmap:120 VP9/90000\r\n");
+ parsedOffer.AddLine("a=rtpmap:126 VP10/90000\r\n");
+ parsedOffer.AddLine("a=rtpmap:97 H265/90000\r\n");
+
+ std::cout << "Modified offer: " << std::endl << parsedOffer.getSdp()
+ << std::endl;
+
+ a2_->SetRemote(TestObserver::OFFER, parsedOffer.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AUDIO);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->ExpectMissingTracks(SdpMediaSection::kVideo);
+ a2_->ExpectMissingTracks(SdpMediaSection::kVideo);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ // TODO: (bug 1140089) a2 is not seeing audio segments in this test.
+ // CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, AudioNegotiationFails)
+{
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ a1_->SetLocal(TestObserver::OFFER, a1_->offer());
+
+ ParsedSDP parsedOffer(a1_->offer());
+ parsedOffer.ReplaceLine("a=rtpmap:0", "a=rtpmap:0 G728/8000");
+ parsedOffer.ReplaceLine("a=rtpmap:8", "a=rtpmap:8 G729/8000");
+ parsedOffer.ReplaceLine("a=rtpmap:9", "a=rtpmap:9 GSM/8000");
+ parsedOffer.ReplaceLine("a=rtpmap:109", "a=rtpmap:109 LPC/8000");
+
+ a2_->SetRemote(TestObserver::OFFER, parsedOffer.getSdp(), false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_VIDEO);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->ExpectMissingTracks(SdpMediaSection::kAudio);
+ a2_->ExpectMissingTracks(SdpMediaSection::kAudio);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, BundleStreamCorrelationBySsrc)
+{
+ if (!UseBundle()) {
+ return;
+ }
+
+ EnsureInit();
+
+ a1_->AddStream(DOMMediaStream::HINT_CONTENTS_AUDIO);
+ a1_->AddStream(DOMMediaStream::HINT_CONTENTS_AUDIO);
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_NONE);
+ ParsedSDP parsedOffer(a1_->offer());
+
+ // Sabotage mid-based matching
+ std::string modifiedOffer = parsedOffer.getSdp();
+ size_t midExtStart =
+ modifiedOffer.find("urn:ietf:params:rtp-hdrext:sdes:mid");
+ if (midExtStart != std::string::npos) {
+ // Just garble it a little
+ modifiedOffer[midExtStart] = 'q';
+ }
+
+ a1_->SetLocal(TestObserver::OFFER, modifiedOffer);
+
+ a2_->SetRemote(TestObserver::OFFER, modifiedOffer, false);
+ a2_->CreateAnswer(ANSWER_AUDIO);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+TEST_P(SignalingTest, BundleStreamCorrelationByUniquePt)
+{
+ if (!UseBundle()) {
+ return;
+ }
+
+ EnsureInit();
+
+ OfferOptions options;
+
+ a1_->CreateOffer(options, OFFER_AV);
+ ParsedSDP parsedOffer(a1_->offer());
+
+ std::string modifiedOffer = parsedOffer.getSdp();
+ // Sabotage ssrc matching
+ size_t ssrcStart =
+ modifiedOffer.find("a=ssrc:");
+ ASSERT_NE(std::string::npos, ssrcStart);
+ // Garble
+ modifiedOffer[ssrcStart+2] = 'q';
+
+ // Sabotage mid-based matching
+ size_t midExtStart =
+ modifiedOffer.find("urn:ietf:params:rtp-hdrext:sdes:mid");
+ if (midExtStart != std::string::npos) {
+ // Just garble it a little
+ modifiedOffer[midExtStart] = 'q';
+ }
+
+ a1_->SetLocal(TestObserver::OFFER, modifiedOffer);
+
+ a2_->SetRemote(TestObserver::OFFER, modifiedOffer, false);
+ a2_->CreateAnswer(OFFER_AV|ANSWER_AV);
+
+ a2_->SetLocal(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a2_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ a1_->SetRemote(TestObserver::ANSWER, a2_->answer(), false);
+
+ ASSERT_EQ(a1_->pObserver->lastStatusCode,
+ PeerConnectionImpl::kNoError);
+
+ WaitForCompleted();
+
+ CheckPipelines();
+ CheckStreams();
+
+ CloseStreams();
+}
+
+INSTANTIATE_TEST_CASE_P(Variants, SignalingTest,
+ ::testing::Values("max-bundle",
+ "balanced",
+ "max-compat",
+ "no_bundle",
+ "reject_bundle"));
+
+} // End namespace test.
+
+bool is_color_terminal(const char *terminal) {
+ if (!terminal) {
+ return false;
+ }
+ const char *color_terms[] = {
+ "xterm",
+ "xterm-color",
+ "xterm-256color",
+ "screen",
+ "linux",
+ "cygwin",
+ 0
+ };
+ const char **p = color_terms;
+ while (*p) {
+ if (!strcmp(terminal, *p)) {
+ return true;
+ }
+ p++;
+ }
+ return false;
+}
+
+static std::string get_environment(const char *name) {
+ char *value = getenv(name);
+
+ if (!value)
+ return "";
+
+ return value;
+}
+
+// This exists to send as an event to trigger shutdown.
+static void tests_complete() {
+ gTestsComplete = true;
+}
+
+// The GTest thread runs this instead of the main thread so it can
+// do things like ASSERT_TRUE_WAIT which you could not do on the main thread.
+static int gtest_main(int argc, char **argv) {
+ MOZ_ASSERT(!NS_IsMainThread());
+
+ ::testing::InitGoogleTest(&argc, argv);
+
+ for(int i=0; i<argc; i++) {
+ if (!strcmp(argv[i],"-t")) {
+ kDefaultTimeout = 20000;
+ }
+ }
+
+ ::testing::AddGlobalTestEnvironment(new test::SignalingEnvironment);
+ int result = RUN_ALL_TESTS();
+
+ test_utils->sts_target()->Dispatch(
+ WrapRunnableNM(&TestStunServer::ShutdownInstance), NS_DISPATCH_SYNC);
+
+ // Set the global shutdown flag and tickle the main thread
+ // The main thread did not go through Init() so calling Shutdown()
+ // on it will not work.
+ gMainThread->Dispatch(WrapRunnableNM(tests_complete), NS_DISPATCH_SYNC);
+
+ return result;
+}
+
+#ifdef SIGNALING_UNITTEST_STANDALONE
+static void verifyStringTable(const EnumEntry* bindingTable,
+ const char** ourTable)
+{
+ while (bindingTable->value) {
+ if (strcmp(bindingTable->value, *ourTable)) {
+ MOZ_CRASH("Our tables are out of sync with the bindings");
+ }
+ ++bindingTable;
+ ++ourTable;
+ }
+}
+#endif // SIGNALING_UNITTEST_STANDALONE
+
+int main(int argc, char **argv) {
+
+ // This test can cause intermittent oranges on the builders
+ CHECK_ENVIRONMENT_FLAG("MOZ_WEBRTC_TESTS")
+
+ if (isatty(STDOUT_FILENO) && is_color_terminal(getenv("TERM"))) {
+ std::string ansiMagenta = "\x1b[35m";
+ std::string ansiCyan = "\x1b[36m";
+ std::string ansiColorOff = "\x1b[0m";
+ callerName = ansiCyan + callerName + ansiColorOff;
+ calleeName = ansiMagenta + calleeName + ansiColorOff;
+ }
+
+#ifdef SIGNALING_UNITTEST_STANDALONE
+ // Verify our string tables are correct.
+ verifyStringTable(PCImplSignalingStateValues::strings,
+ test::PCImplSignalingStateStrings);
+ verifyStringTable(PCImplIceConnectionStateValues::strings,
+ test::PCImplIceConnectionStateStrings);
+ verifyStringTable(PCImplIceGatheringStateValues::strings,
+ test::PCImplIceGatheringStateStrings);
+#endif // SIGNALING_UNITTEST_STANDALONE
+
+ std::string tmp = get_environment("STUN_SERVER_ADDRESS");
+ if (tmp != "")
+ g_stun_server_address = tmp;
+
+ tmp = get_environment("STUN_SERVER_PORT");
+ if (tmp != "")
+ g_stun_server_port = atoi(tmp.c_str());
+
+ test_utils = new MtransportTestUtils();
+ NSS_NoDB_Init(nullptr);
+ NSS_SetDomesticPolicy();
+
+ ::testing::TestEventListeners& listeners =
+ ::testing::UnitTest::GetInstance()->listeners();
+ // Adds a listener to the end. Google Test takes the ownership.
+ listeners.Append(new test::RingbufferDumper(test_utils));
+ test_utils->sts_target()->Dispatch(
+ WrapRunnableNM(&TestStunServer::GetInstance, AF_INET), NS_DISPATCH_SYNC);
+
+ // Set the main thread global which is this thread.
+ nsIThread *thread;
+ NS_GetMainThread(&thread);
+ gMainThread = thread;
+ MOZ_ASSERT(NS_IsMainThread());
+
+ // Now create the GTest thread and run all of the tests on it
+ // When it is complete it will set gTestsComplete
+ NS_NewNamedThread("gtest_thread", &thread);
+ gGtestThread = thread;
+
+ int result;
+ gGtestThread->Dispatch(
+ WrapRunnableNMRet(&result, gtest_main, argc, argv), NS_DISPATCH_NORMAL);
+
+ // Here we handle the event queue for dispatches to the main thread
+ // When the GTest thread is complete it will send one more dispatch
+ // with gTestsComplete == true.
+ while (!gTestsComplete && NS_ProcessNextEvent());
+
+ gGtestThread->Shutdown();
+
+ PeerConnectionCtx::Destroy();
+ delete test_utils;
+
+ return result;
+}